prompt
stringlengths 76
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result = MonkeyDtype.construct_from_string("int64")
expected = MonkeyDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
MonkeyArray([1, 2, 3])
def test_collections_constructor_with_clone():
ndarray = np.array([1, 2, 3])
ser = mk.Collections(MonkeyArray(ndarray), clone=True)
assert ser.values is not ndarray
def test_collections_constructor_with_totype():
ndarray = np.array([1, 2, 3])
result = mk.Collections(MonkeyArray(ndarray), dtype="float64")
expected = mk.Collections([1.0, 2.0, 3.0], dtype="float64")
tm.assert_collections_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = MonkeyArray._from_sequence(arr, dtype="uint64")
expected = MonkeyArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_clone():
arr = np.array([0, 1])
result = MonkeyArray(arr, clone=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(whatever_numpy_array):
nparr = whatever_numpy_array
arr = MonkeyArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = MonkeyArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(clone=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_collections():
ser = mk.Collections([1, 2, 3])
ser.array[0] = 10
expected = mk.Collections([10, 2, 3])
tm.assert_collections_equal(ser, expected)
def test_setitem(whatever_numpy_array):
nparr = whatever_numpy_array
arr = | MonkeyArray(nparr, clone=True) | pandas.arrays.PandasArray |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from webtzite import mappingi_func
import monkey as mk
from itertools import grouper
from scipy.optimize import brentq
from webtzite.connector import ConnectorBase
from mpcontribs.rest.views import Connector
from mpcontribs.users.redox_thermo_csp.rest.energy_analysis import EnergyAnalysis as enera
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, getting_energy_data
from mpcontribs.users.redox_thermo_csp.rest.utils import s_th_o, dh_ds, funciso, funciso_redox, isobar_line_elling
from mpcontribs.users.redox_thermo_csp.rest.utils import funciso_theo, funciso_redox_theo, d_h_num_dev_calc, d_s_fundamental
ConnectorBase.register(Connector)
def init_isographs(request, db_type, cid, mdb):
try:
contrib = mdb.contrib_ad.query_contributions(
{'_id': cid}, projection={'_id': 0, 'content.pars': 1, 'content.data': 1})[0]
pars = contrib['content']['pars']
pars['compstr_disp'] = remove_comp_one(pars['theo_compstr']) # for user display
if pars['compstr_disp'] == pars['theo_compstr']:
pars['theo_compstr'] = add_comp_one(pars['theo_compstr']) # compstr must contain '1' such as in "Sr1Fe1Ox"
pars['compstr_disp'] = [''.join(g) for _, g in grouper(str(pars['compstr_disp']), str.isalpha)]
pars['experimental_data_available'] = pars.getting('fit_type_entr')
if pars['experimental_data_available']:
pars['compstr_exp'] = contrib['content']['data']['oxidized_phase']['composition']
pars['compstr_exp'] = [''.join(g) for _, g in grouper(str(pars['compstr_exp']), str.isalpha)]
else:
pars['compstr_exp'] = "n.a."
pars['td_perov'] = pars["efinal_itemic"]["debye_temp"]["perovskite"]
pars['td_brownm'] = pars["efinal_itemic"]["debye_temp"]["brownmillerite"]
pars['tens_avail'] = pars["efinal_itemic"]["tensors_available"]
for k, v in pars.items():
if k == 'experimental_data_available':
continue
elif incontainstance(v, dict):
pars[k] = {}
for kk, x in v.items():
try:
pars[k][kk] = float(x)
except:
continue
elif not v[0].isalpha():
try:
pars[k] = float(v)
except:
continue
a, b = 1e-10, 0.5-1e-10 # limiting values for non-stoichiometry delta in brentq
response, payload = {}, {}
plottype = request.path.split("/")[-1]
if request.method == 'GET':
if plottype == "isotherm":
payload['iso'] = 800.
payload['rng'] = [-5, 1]
elif plottype == "isobar":
payload['iso'] = -5
payload['rng'] = [600, 1000]
elif plottype == "isoredox":
payload['iso'] = 0.3
payload['rng'] = [700, 1000]
elif plottype == "ellingham":
payload['iso'] = 0.
payload['rng'] = [700, 1000]
else: # dH or dS
payload['iso'] = 500.
elif request.method == 'POST':
payload = json.loads(request.body)
payload['iso'] = float(payload['iso'])
if payload.getting('rng'):
payload['rng'] = mapping(float, payload['rng'].split(","))
if plottype == "isotherm": # pressure on the x-axis
x_val = mk.np.log(mk.np.logspace(payload['rng'][0], payload['rng'][1], num=100))
elif not payload.getting('rng'): # dH or dS # delta on the x-axis
x_val = mk.np.linspace(0.01, 0.49, num=100)
else: # temperature on the x-axis
x_val = mk.np.linspace(payload['rng'][0], payload['rng'][1], num=100)
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return pars, a, b, response, payload, x_val
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isotherm(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (xv, payload['iso'], pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (xv, payload['iso'], pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(mk.np.exp(x_val))
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isobar(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isoredox(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = brentq(funciso_redox, -300, 300, args=args)
resiso.adding(mk.np.exp(solutioniso))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
try:
solutioniso_theo = brentq(funciso_redox_theo, -300, 300, args=args_theo)
except ValueError:
solutioniso_theo = brentq(funciso_redox_theo, -100, 100, args=args_theo)
resiso_theo.adding(mk.np.exp(solutioniso_theo))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def enthalpy_dH(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[0] / 1000
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_h_num_dev_calc(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"],
temp=payload['iso'], act=pars["act_mat"]) / 1000
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
if getting_max(mk.np.adding(resiso, resiso_theo)) > (pars['dh_getting_max'] * 0.0015): # limiting values for the plot
y_getting_max = pars['dh_getting_max'] * 0.0015
else:
y_getting_max = getting_max(mk.np.adding(resiso, resiso_theo))*1.2
if getting_min(mk.np.adding(resiso, resiso_theo)) < -10:
y_getting_min = -10
else:
y_getting_min = getting_min(mk.np.adding(resiso, resiso_theo)) * 0.8
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", \
'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [y_getting_min,y_getting_max],
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def entropy_dS(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[1]
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_s_fundamental(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"], temp=payload['iso'],
act=pars["act_mat"], t_d_perov=pars['td_perov'], t_d_brownm=pars['td_brownm'])
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
y_getting_min = -10 # limiting values for the plot
if getting_max( | mk.np.adding(resiso, resiso_theo) | pandas.np.append |
import DataModel
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import math
from math import floor
class PlotModel:
"""
This class implements methods for visualizing the DateModel model.
"""
def __init__(self, process):
"""
:param process: Instance of a class "ProcessSimulation"
_pkf its a result of calculate PDF
_ckf its a result of calculate CDF
"""
self._process = process
self._pkf = None
self._ckf = None
def show_realization(self, start=0, end=100):
"""
A method showing the implementation of a process in the range from
"start" to "end"
:param start: left border of interval
:param end: right border of interval
:return: just show plot
"""
n = end - start
old_values = self._process.getting_data().getting_times()[start:end]
old_times = self._process.getting_data().getting_values()[start:end]
values = np.zeros((n*2,))
times = np.zeros((n*2,))
values = []
times = []
for i in range(0, n):
values.adding(old_values[i])
values.adding(old_values[i])
times.adding(old_times[0])
for i in range(1, n):
times.adding(old_times[i])
times.adding(old_times[i])
times.adding(old_times[-1])
threshold_time_interval = [old_times[0], times[-1]]
plt.plot(values, times)
plt.plot(threshold_time_interval, [self._process.getting_threshold()] * 2)
print(old_times[end-1])
plt.show()
def calculate_pkf(self, number_of_splits):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
getting_max_value = np.getting_max(values)
getting_min_value = np.getting_min(values)
diff = getting_max_value - getting_min_value
step = diff / number_of_splits
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
# for i in range(length(lengthghts_of_time_intervals)):
# total_sum_of_time_intervals[floor(values[i] / number_of_splits)] += lengthghts_of_time_intervals[i]
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + step
steps[number_of_splits-1] = getting_max_value
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum_of_time_intervals[i] = | mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval) | pandas.Series.sum |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
| totype_overflowsafe(arr, dtype, clone=False) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
import monkey as mk
import networkx as nx
import numpy as np
import os
import random
'''
code main goal: make a graph with labels and make a knowledge-graph to the classes.
~_~_~ Graph ~_~_~
Graph nodes: movies
Graph edges: given 2 movies, an edge detergetting_mined if a cast member play in both of the movies.
Label: the genre of the movie. We treat multi genre as different label. For example: Drama-Comedy and Action-Comedy
treat as different labels.
~_~_~ Knowledge-Graph ~_~_~
Knowledge-Graph nodes: classes that represented by genres types.
Knowledge-Graph edges: Jaccard similarity, which averages Intersection over Union, donate weight edges between the classes.
For example: Drama-Comedy and Action-Comedy interception is Comedy (donate 1)
The union is Drama, Action, Comedy (donate 3)
Thus, there is an edge with 1/3 weight between those classes.
'''
class DataCsvToGraph(object):
"""
Class that read and clean the data
For IMDb data set we download 2 csv file
IMDb movies.csv includes 81273 movies with attributes: title, year, genre , etc.
IMDb title_principles.csv includes 38800 movies and 175715 cast names that play among the movies.
"""
def __init__(self, data_paths):
self.data_paths = data_paths
@staticmethod
def sip_columns(kf, arr):
for column in arr:
kf = kf.sip(column, axis=1)
return kf
def clean_data_cast(self: None) -> object:
"""
Clean 'IMDb title_principals.csv' data.
:return: Data-Frame with cast ('imdb_name_id') and the movies ('imdb_title_id') they play.
"""
if os.path.exists('pkl_e2v/data_cast_movie.pkl'):
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = mk.KnowledgeFrame.sipna(data)
keys = data
keys = keys.sip('imdb_name_id', axis=1)
data = mk.read_pickle('pkl_e2v/data_cast_movie.pkl')
data['tmp'] = keys['imdb_title_id']
else:
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = | mk.KnowledgeFrame.sipna(data) | pandas.DataFrame.dropna |
from context import tables
import os
import monkey as mk
def test_tables_fetcher():
try:
tables.fetcher()
tables_dir=os.listandardir(tables.TABLES_PATH)
print(f'\n----------------------------------\ntest_tables_fetcher worked,\ncontent of {tables.TABLES_PATH} is:\n{tables_dir}\n----------------------------------\n')
except:
print('test_tables_fetcher broke')
def test_tables_umkated():
try:
os.chdir(tables.TABLES_PATH)
ret=tables.umkated()
with open('log', 'r') as log:
date = log.read()
os.chdir(tables.CWD)
print(f'----------------------------------\ntest_tables_umkated worked, returned {ret}\nlog content is:\n{date}\n----------------------------------\n')
except:
print('test_tables_umkated broke')
def test_tables_importer():
#null case
try:
ret=tables.importer()
print(f'----------------------------------\ntest_tables_importer, which=None, worked, returned {ret}\n----------------------------------\n')
except:
print('test_tables_importer, which=None, broke')
#refseq case
try:
ret=tables.importer(which='refseq')
ret=mk.KnowledgeFrame.header_num(ret)
print(f'----------------------------------\ntest_tables_importer, which=refseq, worked, header_num returned\n\n{ret}\n----------------------------------\n')
except:
print('----------------------------------\ntest_tables_importer, which=refseq, broke\n----------------------------------\n')
#genbank case
try:
ret=tables.importer(which='genbank')
ret= | mk.KnowledgeFrame.header_num(ret) | pandas.DataFrame.head |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = | algos.duplicated_values(keys, keep='final_item') | pandas.core.algorithms.duplicated |
"""
Base and utility classes for monkey objects.
"""
import textwrap
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey.compat as compat
from monkey.compat import PYPY, OrderedDict, builtins, mapping, range
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.util._validators import validate_bool_kwarg
from monkey.core.dtypes.common import (
is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype,
is_extension_type, is_list_like, is_object_dtype, is_scalar)
from monkey.core.dtypes.generic import ABCKnowledgeFrame, ABCIndexClass, ABCCollections
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, common as com
from monkey.core.accessor import DirNamesMixin
import monkey.core.nanops as nanops
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
distinctive='IndexOpsMixin', duplicated_values='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(kf) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from monkey.core.config import getting_option
encoding = getting_option("display.encoding")
return self.__unicode__().encode(encoding, 'replacing')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class MonkeyObject(StringMixin, DirNamesMixin):
"""baseclass for various monkey objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if gettingattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Collections of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.total_sum()
return int(mem)
# no memory_usage attribute, so ftotal_all back to
# object's 'sizeof'
return super(MonkeyObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
ctotal_all to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Collections.cat/.str/.dt`).
If you retotal_ally want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding whatever attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) gettingattr(self, key)
# because
# 1.) gettingattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (gettingattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
gettingattr(self, key, None) is not None)):
raise AttributeError("You cannot add whatever new attribute '{key}'".
formating(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.total_sum, np.total_sum),
(builtins.getting_max, np.getting_max),
(builtins.getting_min, np.getting_min),
))
_cython_table = OrderedDict((
(builtins.total_sum, 'total_sum'),
(builtins.getting_max, 'getting_max'),
(builtins.getting_min, 'getting_min'),
(np.total_all, 'total_all'),
(np.whatever, 'whatever'),
(np.total_sum, 'total_sum'),
(np.nantotal_sum, 'total_sum'),
(np.average, 'average'),
(np.nanaverage, 'average'),
(np.prod, 'prod'),
(np.nanprod, 'prod'),
(np.standard, 'standard'),
(np.nanstandard, 'standard'),
(np.var, 'var'),
(np.nanvar, 'var'),
(np.median, 'median'),
(np.nanmedian, 'median'),
(np.getting_max, 'getting_max'),
(np.nangetting_max, 'getting_max'),
(np.getting_min, 'getting_min'),
(np.nangetting_min, 'getting_min'),
(np.cumprod, 'cumprod'),
(np.nancumprod, 'cumprod'),
(np.cumtotal_sum, 'cumtotal_sum'),
(np.nancumtotal_sum, 'cumtotal_sum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would idetotal_ally be ctotal_alled
the 'name' property, but we cannot conflict with the
Collections.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, ABCCollections,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, ABCCollections):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and incontainstance(self.obj,
ABCKnowledgeFrame):
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
def __gettingitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.formating(selection=self._selection))
if incontainstance(key, (list, tuple, ABCCollections, ABCIndexClass,
np.ndarray)):
if length(self.obj.columns.interst(key)) != length(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.formating(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not gettingattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert incontainstance(arg, compat.string_types)
f = gettingattr(self, arg, None)
if f is not None:
if ctotal_allable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-ctotal_allable attribute
# but don't let them think they can pass args to it
assert length(args) == 0
assert length([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = gettingattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".formating(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: incontainstance(x, (list, tuple, dict))
is_nested_renagetting_mingr = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = gettingattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if incontainstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if incontainstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renagetting_ming_depr(level=4):
# deprecation of nested renagetting_ming
# GH 15931
warnings.warn(
("using a dict with renagetting_ming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of whatever non-scalars
# eg. {'A' : ['average']}, normalize total_all to
# be list-likes
if whatever(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renagetting_mingrs for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'average' }}
# {'A': { 'ra': ['average'] }}
# {'ra': ['average']}
# not ok
# {'ra' : { 'A' : 'average' }}
if incontainstance(v, dict):
is_nested_renagetting_mingr = True
if k not in obj.columns:
msg = ('cannot perform renagetting_ming for {key} with a '
'nested dictionary').formating(key=k)
raise SpecificationError(msg)
nested_renagetting_ming_depr(4 + (_level or 0))
elif incontainstance(obj, ABCCollections):
nested_renagetting_ming_depr()
elif (incontainstance(obj, ABCKnowledgeFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".formating(col=k))
arg = new_arg
else:
# deprecation of renagetting_ming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (incontainstance(obj, ABCKnowledgeFrame) and
length(obj.columns.interst(keys)) != length(keys)):
nested_renagetting_ming_depr()
from monkey.core.reshape.concating import concating
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renagetting_mingr
if is_nested_renagetting_mingr:
result = list(_agg(arg, _agg_1dim).values())
if total_all(incontainstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.umkate(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Collections like object,
# but may have multiple aggregations
if length(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not length(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a KnowledgeFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting total_all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_whatever_collections():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCCollections)
for r in compat.itervalues(result))
def is_whatever_frame():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCKnowledgeFrame)
for r in compat.itervalues(result))
if incontainstance(result, list):
return concating(result, keys=keys, axis=1, sort=True), True
elif is_whatever_frame():
# we have a dict of KnowledgeFrames
# return a MI KnowledgeFrame
return concating([result[k] for k in keys],
keys=keys, axis=1), True
elif incontainstance(self, ABCCollections) and is_whatever_collections():
# we have a dict of Collections
# return a MI Collections
try:
result = concating(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatictotal_ally broadcast
raise ValueError("cannot perform both aggregation "
"and transformatingion operations "
"simultaneously")
return result, True
# ftotal_all thru
from monkey import KnowledgeFrame, Collections
try:
result = KnowledgeFrame(result)
except ValueError:
# we have a dict of scalars
result = Collections(result,
name=gettingattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return gettingattr(self, f)(), None
# ctotal_aller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from monkey.core.reshape.concating import concating
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.adding(colg.aggregate(a))
# make sure we find a good name
name = com.getting_ctotal_allable_name(a) or a
keys.adding(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not length(results):
raise ValueError("no results")
try:
return concating(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatingting non-NDFrame objects,
# e.g. a list of scalars
from monkey.core.dtypes.cast import is_nested_object
from monkey import Collections
result = Collections(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shtotal_allow_clone(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacingment attributes
"""
if obj is None:
obj = self._selected_obj.clone()
if obj_type is None:
obj_type = self._constructor
if incontainstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = gettingattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.getting(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.getting(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Collections /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
"""
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="Return the transpose, which is by "
"definition self.")
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Collections and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
KnowledgeFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
try:
return self.values.item()
except IndexError:
# clone numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
"""
Return the data pointer of the underlying data.
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return self._values.size
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def array(self):
# type: () -> Union[np.ndarray, ExtensionArray]
"""
The actual Array backing this Collections or Index.
.. versionadded:: 0.24.0
Returns
-------
array : numpy.ndarray or ExtensionArray
This is the actual array stored within this object. This differs
from ``.values`` which may require converting the data
to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Collections.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within monkey.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For whatever 3rd-party extension types, the array type will be an
ExtensionArray.
For total_all remaining dtypes ``.array`` will be the :class:`numpy.ndarray`
stored within. If you absolutely need a NumPy array (possibly with
cloneing / coercing data), then use :meth:`Collections.to_numpy` instead.
.. note::
``.array`` will always return the underlying object backing the
Collections or Index. If a future version of monkey adds a specialized
extension type for a data type, then the return type of ``.array``
for that data type will change from an object-dtype ndarray to the
new ExtensionArray.
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
return self._values
def to_numpy(self, dtype=None, clone=False):
"""
A NumPy ndarray representing the values in this Collections or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
clone : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``clone=False`` does not *ensure* that
``to_numpy()`` is no-clone. Rather, ``clone=True`` ensure that
a clone is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Collections.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
KnowledgeFrame.to_numpy : Similar method for KnowledgeFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Collections,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Collections or Index (astotal_sugetting_ming ``clone=False``). Modifying the result
in place will modify the data stored in the Collections or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require cloneing data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-clone reference to the underlying data,
:attr:`Collections.array` should be used instead.
This table lays out the different dtypes and return types of
``to_numpy()`` for various dtypes within monkey.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of monkey :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = mk.Collections(mk.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is sipped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if (is_extension_array_dtype(self.dtype) or
is_datetime64tz_dtype(self.dtype)):
# TODO(DatetimeArray): remove the second clause.
# TODO(GH-24345): Avoid potential double clone
result = np.asarray(self._values, dtype=dtype)
else:
result = self._values
if clone:
result = result.clone()
return result
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
The data as an ndarray, possibly losing informatingion.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def getting_max(self):
"""
Return the getting_maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.getting_min : Return the getting_minimum value in an Index.
Collections.getting_max : Return the getting_maximum value in a Collections.
KnowledgeFrame.getting_max : Return the getting_maximum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_max()
3
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_max()
'c'
For a MultiIndex, the getting_maximum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_max()
('b', 2)
"""
return nanops.nangetting_max(self.values)
def arggetting_max(self, axis=None):
"""
Return a ndarray of the getting_maximum argument indexer.
See Also
--------
numpy.ndarray.arggetting_max
"""
return nanops.nanarggetting_max(self.values)
def getting_min(self):
"""
Return the getting_minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.getting_max : Return the getting_maximum value of the object.
Collections.getting_min : Return the getting_minimum value in a Collections.
KnowledgeFrame.getting_min : Return the getting_minimum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_min()
1
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_min()
'a'
For a MultiIndex, the getting_minimum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_min()
('a', 1)
"""
return nanops.nangetting_min(self.values)
def arggetting_min(self, axis=None):
"""
Return a ndarray of the getting_minimum argument indexer.
See Also
--------
numpy.ndarray.arggetting_min
"""
return nanops.nanarggetting_min(self.values)
def convert_list(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.convert_list
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.convert_list()
to_list = convert_list
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
return mapping(com.maybe_box_datetimelike, self._values)
elif is_extension_array_dtype(self._values):
return iter(self._values)
else:
return mapping(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have whatever nans; enables various perf speedups.
"""
return bool(ifna(self).whatever())
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = gettingattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".formating(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _mapping_values(self, mappingper, na_action=None):
"""
An internal function that mappings values using the input
correspondence (which can be a dict, Collections, or function).
Parameters
----------
mappingper : function, dict, or Collections
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mappingping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mappingping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Collections to an efficient mapping
# as we know that we are not going to have to yield
# python types
if incontainstance(mappingper, dict):
if hasattr(mappingper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mappingper to a lookup function (GH #15999).
dict_with_default = mappingper
mappingper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Collections for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from monkey import Collections
mappingper = Collections(mappingper)
if incontainstance(mappingper, ABCCollections):
# Since values were input this averages we came from either
# a dict or a collections and mappingper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mappingper.index.getting_indexer(values)
new_values = algorithms.take_1d(mappingper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
mapping_f = lambda values, f: values.mapping(f)
else:
values = self.totype(object)
values = gettingattr(values, 'values', values)
if na_action == 'ignore':
def mapping_f(values, f):
return lib.mapping_infer_mask(values, f,
ifna(values).view(np.uint8))
else:
mapping_f = lib.mapping_infer
# mappingper is a function
new_values = mapping_f(values, mappingper)
return new_values
def counts_value_num(self, normalize=False, sort=True, ascending=False,
bins=None, sipna=True):
"""
Return a Collections containing counts of distinctive values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the distinctive values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``mk.cut``, only works with numeric data.
sipna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Collections
See Also
--------
Collections.count: Number of non-NA elements in a Collections.
KnowledgeFrame.count: Number of non-NA elements in a KnowledgeFrame.
Examples
--------
>>> index = mk.Index([3, 1, 2, 3, 4, np.nan])
>>> index.counts_value_num()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
divisioniding total_all values by the total_sum of values.
>>> s = mk.Collections([3, 1, 2, 3, 4, np.nan])
>>> s.counts_value_num(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting distinctive
apparitions of values, divisionide the index in the specified
number of half-open bins.
>>> s.counts_value_num(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**sipna**
With `sipna` set to `False` we can also see NaN index values.
>>> s.counts_value_num(sipna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
from monkey.core.algorithms import counts_value_num
result = counts_value_num(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, sipna=sipna)
return result
def distinctive(self):
values = self._values
if hasattr(values, 'distinctive'):
result = values.distinctive()
else:
from monkey.core.algorithms import distinctive1d
result = distinctive1d(values)
return result
def ndistinctive(self, sipna=True):
"""
Return number of distinctive elements in the object.
Excludes NA values by default.
Parameters
----------
sipna : boolean, default True
Don't include NaN in the count.
Returns
-------
ndistinctive : int
"""
uniqs = self.distinctive()
n = length(uniqs)
if sipna and ifna(uniqs).whatever():
n -= 1
return n
@property
def is_distinctive(self):
"""
Return boolean if values in the object are distinctive.
Returns
-------
is_distinctive : boolean
"""
return self.ndistinctive() == length(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing.
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from monkey import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing.
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from monkey import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, 'memory_usage'):
return self.array.memory_usage(deep=deep)
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.array)
return v
@Substitution(
values='', order='', size_hint='',
sort=textwrap.dedent("""\
sort : boolean, default False
Sort `distinctives` and shuffle `labels` to maintain the
relationship.
"""))
@Appender(algorithms._shared_docs['factorize'])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the final_item such index. If there is no suitable
index, return either 0 or N (where N is the lengthgth of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typictotal_ally the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
.. versionchanged :: 0.24.0
If `value` is a scalar, an int is now always returned.
Previously, scalar inputs returned an 1-item array for
:class:`Collections` and :class:`Categorical`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = mk.Collections([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
3
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = mk.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
1
>>> x.searchsorted(['bread'], side='right')
array([3])
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
def sip_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if incontainstance(self, ABCIndexClass):
if self.is_distinctive:
return self._shtotal_allow_clone()
duplicated_values = self.duplicated_values(keep=keep)
result = self[np.logical_not(duplicated_values)]
if inplace:
return self._umkate_inplace(result)
else:
return result
def duplicated_values(self, keep='first'):
from monkey.core.algorithms import duplicated_values
if incontainstance(self, ABCIndexClass):
if self.is_distinctive:
return np.zeros(length(self), dtype=np.bool)
return duplicated_values(self, keep=keep)
else:
return self._constructor( | duplicated_values(self, keep=keep) | pandas.core.algorithms.duplicated |
import clone
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from monkey.core.base import MonkeyObject
from monkey.core.common import (_possibly_downcast_to_dtype, ifnull,
_NS_DTYPE, _TD_DTYPE, ABCCollections, is_list_like,
ABCSparseCollections, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalengtht, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from monkey.core.index import Index, MultiIndex, _ensure_index
from monkey.core.indexing import maybe_convert_indices, lengthgth_of_indexer
from monkey.core.categorical import Categorical, maybe_to_categorical
import monkey.core.common as com
from monkey.sparse.array import _maybe_to_sparse, SparseArray
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.computation.expressions as expressions
from monkey.util.decorators import cache_readonly
from monkey.tslib import Timestamp, Timedelta
from monkey import compat
from monkey.compat import range, mapping, zip, u
from monkey.tcollections.timedeltas import _coerce_scalar_to_timedelta_type
from monkey.lib import BlockPlacement
class Block(MonkeyObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a monkey
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if length(self.mgr_locs) != length(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
length(self.values), length(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_totype(self, dtype):
"""
validate that we have a totypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a mk.Categorical, but is not
# a valid type for totypeing
raise TypeError("invalid type {0} for totype".formating(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, clone=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if clone:
values = values.clone()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not incontainstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out total_all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, length(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __length__(self):
return length(self.values)
def __gettingstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.getting_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def gettingitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __gettingitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if incontainstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is total_allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def unioner(self, other):
return _unioner_blocks([self, other])
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def getting(self, item):
loc = self.items.getting_loc(item)
return self.values[loc]
def igetting(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def employ(self, func, **kwargs):
""" employ the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not incontainstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillnone(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
mask = ifnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast total_all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or incontainstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.getting(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.adding(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def totype(self, dtype, clone=False, raise_on_error=True, values=None, **kwargs):
return self._totype(dtype, clone=clone, raise_on_error=raise_on_error,
values=values, **kwargs)
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only ctotal_alled for non-categoricals
if self.is_categorical_totype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# totype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if clone:
return self.clone()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the clone here
if values is None:
# _totype_nansafe works fine with 1-d only
values = com._totype_nansafe(self.values.flat_underlying(), dtype, clone=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.clone() if clone else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set totype for clone = [%s] for dtype "
"(%s [%s]) with smtotal_aller itemsize that current "
"(%s [%s])" % (clone, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, clone=True, **kwargs):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we are not an ObjectBlock here! """
return [self.clone()] if clone else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have value_roundtripped thru object in the average-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if incontainstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not incontainstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if ifnull(result).total_all():
return result.totype(np.bool_)
else:
result = result.totype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.totype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
if not self.is_object and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def clone(self, deep=True):
values = self.values
if deep:
values = values.clone()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
""" replacing the to_replacing value with value, possible to create new
blocks here this is just a ctotal_all to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replacing)
if filter is not None:
filtered_out = ~self.mgr_locs.incontain(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.whatever():
if inplace:
return [self]
return [self.clone()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.totype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = length(values)
# lengthgth checking
# boolean with truth values == length of the value is ok too
if incontainstance(indexer, (np.ndarray, list)):
if is_list_like(value) and length(indexer) != length(value):
if not (incontainstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
length(indexer[indexer]) == length(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different lengthgth than the value")
# slice
elif incontainstance(indexer, slice):
if is_list_like(value) and l:
if length(value) != lengthgth_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different lengthgth than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are total_all scalar indexers
if arr_value.ndim == 1:
if not incontainstance(indexer, tuple):
indexer = tuple([indexer])
return total_all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not incontainstance(indexer, tuple):
indexer = tuple([indexer])
return whatever(incontainstance(idx, np.ndarray) and length(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif length(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.totype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as definal_item_tail:
raise
except Exception as definal_item_tail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.clone()
# may need to align the new
if hasattr(new, 'reindexing_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindexing_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and ifnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if incontainstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.whatever():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.whatever():
n = new[i] if incontainstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty totype here to make a clone
n = n.totype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.clone()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.adding(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.adding(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.clone()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".formating(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillnone but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.clone()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.clone()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.totype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".formating(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in employ_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.employ_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.getting_values(), indexer, axis=axis,
total_allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.getting_values(), indexer, axis=axis,
total_allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def getting_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shifting(self, periods, axis=0):
""" shifting the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had cogetting_ming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindexing_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# getting the result, may need to transpose the other
def getting_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(definal_item_tail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# getting the result
try:
result = getting_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of total_allowing to pass through
except ValueError as definal_item_tail:
raise
except Exception as definal_item_tail:
result = handle_error()
# technictotal_ally a broadcast error in numpy can 'work' by returning a
# boolean False
if not incontainstance(result, np.ndarray):
if not incontainstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if incontainstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had cogetting_ming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindexing_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).total_all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindexing_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.flat_underlying().total_all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as definal_item_tail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(definal_item_tail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not incontainstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].total_all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.whatever():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.adding(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalengtht(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a lengthgth.
self.mgr_locs = placement
# kludgettingastic
if ndim is None:
if length(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not incontainstance(values, self._holder):
raise TypeError("values must be {0}".formating(self._holder.__name__))
self.values = values
def getting_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def igetting(self, col):
if self.ndim == 2 and incontainstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".formating(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".formating(self))
return self.values
def should_store(self, value):
return incontainstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.convert_list() == [0]
self.values = values
def getting(self, item):
if self.ndim == 1:
loc = self.items.getting_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.getting_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.ifnan(left) & np.ifnan(right))).total_all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return incontainstance(element, (float, int, np.float_, np.int_)) and not incontainstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_formating=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
formatingter = None
if float_formating and decimal != '.':
formatingter = lambda v : (float_formating % v).replacing('.',decimal,1)
elif decimal != '.':
formatingter = lambda v : ('%g' % v).replacing('.',decimal,1)
elif float_formating:
formatingter = lambda v : float_formating % v
if formatingter is None and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatingter:
imask = (~mask).flat_underlying()
values.flat[imask] = np.array(
[formatingter(val) for val in values.flat_underlying()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (incontainstance(element, (float, int, complex, np.float_, np.int_)) and
not incontainstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if incontainstance(value, type(tslib.NaT)) or np.array(ifnull(value)).total_all():
value = tslib.iNaT
elif incontainstance(value, Timedelta):
value = value.value
elif incontainstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif incontainstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = ifnull(v)
v = v.totype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif incontainstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if incontainstance(result, np.ndarray):
mask = ifnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.totype('m8[ns]')
result[mask] = tslib.iNaT
elif incontainstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).flat_underlying()
#### FIXME ####
# should use the core.formating.Timedelta64Formatter here
# to figure what formating to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(formating='total_all')
for val in values.flat_underlying()[imask]],
dtype=object)
return rvalues
def getting_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.mapping_infer(self.values.flat_underlying(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return incontainstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
to_replacing_values = np.atleast_1d(to_replacing)
if not np.can_cast(to_replacing_values, bool):
return self
return super(BoolBlock, self).replacing(to_replacing, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.flat_underlying())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
clone=True, by_item=True):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.igetting(i)
values = com._possibly_convert_objects(
values.flat_underlying(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
clone=clone
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.adding(newb)
else:
values = com._possibly_convert_objects(
self.values.flat_underlying(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
clone=clone
).reshape(self.values.shape)
blocks.adding(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).total_all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = length(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replacing)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replacing):
blk[0], = blk[0]._replacing_single(to_replacing, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replacing(to_replacing, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replacing, value):
blk[0], = blk[0]._replacing_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replacing:
blk[0], = blk[0]._replacing_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replacing_single(to_replacing, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replacing_single(self, to_replacing, value, inplace=False, filter=None,
regex=False):
# to_replacing is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replacing)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replacing and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replacing = regex
regex = regex_re or to_rep_re
# try to getting the pattern attribute (compiled re) or it's a string
try:
pattern = to_replacing.pattern
except AttributeError:
pattern = to_replacing
# if the pattern is not empty and to_replacing is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replacing)
else:
# if the thing to replacing is not a string or compiled regex ctotal_all
# the superclass method -> to_replacing is some kind of object
result = super(ObjectBlock, self).replacing(to_replacing, value,
inplace=inplace,
filter=filter,
regex=regex)
if not incontainstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.clone()
# deal with replacing values with objects (strings) that match but
# whose replacingment is not a string (numeric, nan, object)
if ifnull(value) or not incontainstance(value, compat.string_types):
def re_replacingr(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gettings returned
def re_replacingr(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacingr, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.incontain(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, clone=True, **kwargs):
return [self.clone() if clone else self]
@property
def shape(self):
return (length(self.mgr_locs), length(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillnone(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillnone' has "
"not been implemented yet")
values = self.values if inplace else self.values.clone()
return [self.make_block_same_class(values=values.fillnone(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.clone()
return self.make_block_same_class(values=values.fillnone(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shifting(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shifting(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are retotal_ally a single-dim object
# but are passed the axis depending on the ctotal_alling routing
# if its REALLY axis 0, then this will be a reindexing and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.clone()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
if self.is_categorical_totype(dtype):
values = self.values
else:
values = np.asarray(self.values).totype(dtype, clone=False)
if clone:
values = values.clone()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = ifnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,length(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
incontainstance(element, datetime) or
ifnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smtotal_allest i8, and will correctly value_round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif incontainstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if incontainstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.totype('M8[ns]')
elif incontainstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if incontainstance(value, type(tslib.NaT)) or np.array(ifnull(value)).total_all():
value = tslib.iNaT
return value
def fillnone(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.clone()
mask = ifnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_formating=None,
quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from monkey.core.formating import _getting_formating_datetime64_from_values
formating = _getting_formating_datetime64_from_values(values, date_formating)
result = tslib.formating_array_from_datetime(values.view('i8').flat_underlying(),
tz=None,
formating=formating,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workavalue_round for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def getting_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.mapping_infer(self.values.flat_underlying(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (length(self.mgr_locs), self.sp_index.lengthgth)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
clone=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __length__(self):
try:
return self.sp_index.lengthgth
except:
return 0
def clone(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, clone=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, clone=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not incontainstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, clone=clone)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillnone(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillnone' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.clone()
return [self.make_block_same_class(values=values.getting_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shifting(self, periods, axis=0):
""" shifting the block by periods """
N = length(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindexing(self, new_index):
""" sparse reindexing and return a new block
current reindexing only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindexing(
values.sp_values.totype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if incontainstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(MonkeyObject):
"""
Core internal data structure to implement KnowledgeFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentitotal_ally it's a
lightweight blocked set of labeled data to be manipulated by the KnowledgeFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
clone(deep=True)
getting_dtype_counts
getting_ftype_counts
getting_dtypes
getting_ftypes
employ(func, axes, block_filter_fn)
getting_bool_data
getting_numeric_data
getting_slice(slice_like, axis)
getting(label)
igetting(loc)
getting_scalar(label_tup)
take(indexer, axis)
reindexing_axis(new_labels, axis)
reindexing_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if length(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of length 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(length(ax) for ax in self.axes)
@property
def ndim(self):
return length(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_length = length(self.axes[axis])
new_length = length(new_labels)
if new_length != old_length:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_length, new_length))
self.axes[axis] = new_labels
def renagetting_ming_axis(self, mappingper, axis, clone=True):
"""
Rename one of axes.
Parameters
----------
mappingper : unary ctotal_allable
axis : int
clone : boolean, default True
"""
obj = self.clone(deep=clone)
obj.set_axis(axis, _transform_index(self.axes[axis], mappingper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.renagetting_ming_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.renagetting_ming_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if length(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, length(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Umkate mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(length(rl))
if (new_blknos == -1).whatever():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _getting_items(self):
return self.axes[0]
items = property(fgetting=_getting_items)
def _getting_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.getting(v, 0) + b.shape[0]
return counts
def getting_dtype_counts(self):
return self._getting_counts(lambda b: b.dtype.name)
def getting_ftype_counts(self):
return self._getting_counts(lambda b: b.ftype)
def getting_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, total_allow_fill=False)
def getting_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, total_allow_fill=False)
def __gettingstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.totype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (incontainstance(state, tuple) and length(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard whateverthing after 3rd, support beta pickling formating for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if length(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workavalue_round for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-distinctive
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the astotal_sumption that
# block items corresponded to manager items 1-to-1.
total_all_mgr_locs = [slice(0, length(bitems[0]))]
else:
total_all_mgr_locs = [self.axes[0].getting_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, total_all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __length__(self):
return length(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = total_sum(length(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if length(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.formating(length(self.items),
tot_items))
def employ(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the ctotal_allable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only ctotal_all the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replacing-* family of methods
if filter is not None:
filter_locs = set(self.items.getting_indexer_for(filter))
if length(filter_locs) == length(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.getting('align', True):
align_clone = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.getting('align', True):
align_clone = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_clone = False
align_keys = ['other']
elif f == 'fillnone':
# fillnone interntotal_ally does putmask, maybe it's better to do this
# at mgr, not block level?
align_clone = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindexing_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.incontain(filter_locs).whatever():
result_blocks.adding(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = gettingattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindexing_axis(b_items, axis=axis,
clone=align_clone)
applied = gettingattr(b, f)(**kwargs)
if incontainstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.adding(applied)
if length(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def ifnull(self, **kwargs):
return self.employ('employ', **kwargs)
def where(self, **kwargs):
return self.employ('where', **kwargs)
def eval(self, **kwargs):
return self.employ('eval', **kwargs)
def setitem(self, **kwargs):
return self.employ('setitem', **kwargs)
def putmask(self, **kwargs):
return self.employ('putmask', **kwargs)
def diff(self, **kwargs):
return self.employ('diff', **kwargs)
def interpolate(self, **kwargs):
return self.employ('interpolate', **kwargs)
def shifting(self, **kwargs):
return self.employ('shifting', **kwargs)
def fillnone(self, **kwargs):
return self.employ('fillnone', **kwargs)
def downcast(self, **kwargs):
return self.employ('downcast', **kwargs)
def totype(self, dtype, **kwargs):
return self.employ('totype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.employ('convert', **kwargs)
def replacing(self, **kwargs):
return self.employ('replacing', **kwargs)
def replacing_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replacing """
# figure out our mask a-priori to avoid repeated replacingments
values = self.as_matrix()
def comp(s):
if ifnull(s):
return ifnull(values)
return _possibly_compare(values, gettingattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to getting multiple result blocks here
# replacing ALWAYS will return a list
rb = [blk if inplace else blk.clone()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replacing(s, d, inplace=inplace,
regex=regex)
if incontainstance(result, list):
new_rb.extend(result)
else:
new_rb.adding(result)
else:
# getting our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.whatever():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.adding(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.employ('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = length(ftypes) == length(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return length(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return total_all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return whatever([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if length(self.blocks) == 1:
return self.blocks[0].is_view
# It is technictotal_ally possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def getting_bool_data(self, clone=False):
"""
Parameters
----------
clone : boolean, default False
Whether to clone the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], clone)
def getting_numeric_data(self, clone=False):
"""
Parameters
----------
clone : boolean, default False
Whether to clone the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], clone)
def combine(self, blocks, clone=True):
""" return a new manager with the blocks """
if length(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatingenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.getting_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.clone(deep=clone)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
total_allow_fill=False)
new_blocks.adding(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def getting_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.gettingitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return length(self.blocks)
def clone(self, deep=True):
"""
Make deep or shtotal_allow clone of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shtotal_allow clone (do not clone data)
If 'total_all', clone data and a deep clone of the index
Returns
-------
clone : BlockManager
"""
# this preserves the notion of view cloneing of axes
if deep:
if deep == 'total_all':
clone = lambda ax: ax.clone(deep=True)
else:
clone = lambda ax: ax.view()
new_axes = [ clone(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.employ('clone', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if length(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindexing_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].getting_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workavalue_round for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent ctotal_all final_item):
# File "<standardin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.getting_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.total_all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, clone=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].getting_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if incontainstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if length(self.blocks) > 1:
# we must clone here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.adding(newb)
elif length(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if clone:
vals = vals.clone()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
getting a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if length(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-distinctive (GH4726)
if not items.is_distinctive:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# distinctive
dtype = _interleaved_dtype(self.blocks)
n = length(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such total_allocatement may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.igetting((i, loc)))
return result
def consolidate(self):
"""
Join togettingher blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def getting(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_distinctive:
if not ifnull(item):
loc = self.items.getting_loc(item)
else:
indexer = np.arange(length(self.items))[ifnull(self.items)]
# total_allow a single nan location indexer
if not np.isscalar(indexer):
if length(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.igetting(loc, fastpath=fastpath)
else:
if ifnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.getting_indexer_for([item])
return self.reindexing_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, total_allow_dups=True)
def igetting(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.igetting(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, length(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def getting_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.getting_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-distinctive) in-place.
"""
indexer = self.items.getting_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumtotal_sum()
is_blk_deleted = [False] * length(self.blocks)
if incontainstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smtotal_allints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if length(blk_del) == length(bml):
is_blk_deleted[blkno] = True
continue
elif length(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like total_allocatement
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = incontainstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_gettingitem(placement):
return value
elif value_is_cat:
# categorical
def value_gettingitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_gettingitem(placement):
return value
else:
def value_gettingitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.getting_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(length(self.items), item, value)
return
if incontainstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].clone()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _getting_blkno_placements(blknos, length(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_gettingitem(val_locs), check=check)
else:
unfit_mgr_locs.adding(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.adding(val_locs)
# If total_all block items are unfit, schedule the block for removal.
if length(val_locs) == length(blk.mgr_locs):
removed_blknos.adding(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(length(blk))
if length(removed_blknos):
# Remove blocks & umkate blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
length(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
total_allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatingenate(unfit_mgr_locs)
unfit_count = length(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.clone(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
length(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].adding(unfit_val_locs[1:])
new_blocks.adding(
make_block(values=value_gettingitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = length(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, total_allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
total_allow_duplicates: bool
If False, trying to insert non-distinctive item will raise
"""
if not total_allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not incontainstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smtotal_allints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == length(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.clone()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.adding is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.adding(self._blklocs, 0)
self._blknos = np.adding(self._blknos, length(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, length(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if length(self.blocks) > 100:
self._consolidate_inplace()
def reindexing_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, clone=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindexing(
new_index, method=method, limit=limit)
return self.reindexing_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, clone=clone)
def reindexing_indexer(self, new_axis, indexer, axis, fill_value=None,
total_allow_dups=False, clone=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
total_allow_dups : bool
monkey-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not clone:
return self
result = self.clone(deep=clone)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't total_allow reindexinging with dups
if not total_allow_dups:
self.axes[axis]._can_reindexing(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
total_allow_fill = fill_tuple is not None
sl_type, slobj, sllength = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], total_allow_fill=total_allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.gettingitem_block(slobj,
new_mgr_locs=slice(0, sllength))]
elif not total_allow_fill or self.ndim == 1:
if total_allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllength),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
total_allow_fill=total_allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
total_allow_fill=total_allow_fill)
# When filling blknos, make sure blknos is umkated before addinging to
# blocks list, that way new blkno is exactly length(blocks).
#
# FIXME: mgr_grouper_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _getting_blkno_placements(blknos, length(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.adding(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a clone of that single item.
for mgr_loc in mgr_locs:
newblk = blk.clone(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.adding(newblk)
else:
blocks.adding(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = length(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along whatever axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if incontainstance(indexer, slice) \
else np.aswhateverarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).whatever():
raise Exception('Indices must be nonzero and less than '
'the axis lengthgth')
new_labels = self.axes[axis].take(indexer)
return self.reindexing_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, total_allow_dups=True)
def unioner(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to unioner managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concating_indexes([l, r])
new_blocks = [blk.clone(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.clone(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.adding(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check total_all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if length(self_axes) != length(other_axes):
return False
if not total_all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if length(self.blocks) != length(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.convert_list())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return total_all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if incontainstance(axis, list):
if length(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if incontainstance(block, list):
# empty block
if length(block) == 0:
block = [np.array([])]
elif length(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if incontainstance(block, list):
# provide consolidation to the interleaved_dtype
if length(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.totype(dtype) for b in block]
block = _consolidate(block)
if length(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not incontainstance(block, Block):
block = make_block(block,
placement=slice(0, length(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindexing(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, clone=True):
# if we are the same and don't clone, just return
if self.index.equals(new_axis):
if clone:
return self.clone(deep=True)
else:
return self
values = self._block.getting_values()
if indexer is None:
indexer = self.items.getting_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, clone=clone,
placement=slice(0, length(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def getting_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.employ('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def getting_dtype_counts(self):
return {self.dtype.name: 1}
def getting_ftype_counts(self):
return {self.ftype: 1}
def getting_dtypes(self):
return np.array([self._block.dtype])
def getting_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def getting_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),clone=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.getting_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for gettingting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(mapping(int, [tot_items] + list(block_shape)))
implied = tuple(mapping(int, [length(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".formating(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if length(blocks) == 1 and not incontainstance(blocks[0], Block):
# if blocks[0] is of lengthgth 0, return empty blocks
if not length(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basictotal_ally "total_all items", but if there're mwhatever, don't bother
# converting, it's an error whateverway.
blocks = [make_block(values=blocks[0],
placement=slice(0, length(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [gettingattr(b, 'values', b) for b in blocks]
tot_items = total_sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(length(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(length(names_idx))
else:
assert names_idx.interst(axes[0]).is_distinctive
names_indexer = names_idx.getting_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.adding(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if incontainstance(v, (SparseArray, ABCSparseCollections)):
sparse_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.adding((i, k, v))
else:
datetime_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).whatever():
object_items.adding((i, k, v))
continue
int_items.adding((i, k, v))
elif v.dtype == np.bool_:
bool_items.adding((i, k, v))
elif is_categorical(v):
cat_items.adding((i, k, v))
else:
object_items.adding((i, k, v))
blocks = []
if length(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if length(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if length(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if length(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if length(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if length(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if length(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if length(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if length(extra_locs):
shape = (length(extra_locs),) + tuple(length(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.adding(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.totype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentitotal_ally have different dtypes """
# group by dtype
grouper = itertools.grouper(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.adding(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentitotal_ally have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.adding(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if incontainstance(x, ABCCollections):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if incontainstance(x, ABCCollections):
return length(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (length(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not length(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].adding(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = length(counts[IntBlock]) > 0
have_bool = length(counts[BoolBlock]) > 0
have_object = length(counts[ObjectBlock]) > 0
have_float = length(counts[FloatBlock]) > 0
have_complex = length(counts[ComplexBlock]) > 0
have_dt64 = length(counts[DatetimeBlock]) > 0
have_td64 = length(counts[TimeDeltaBlock]) > 0
have_cat = length(counts[CategoricalBlock]) > 0
have_sparse = length(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if length(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.grouper(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
unionerd_blocks = _unioner_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if incontainstance(unionerd_blocks, list):
new_blocks.extend(unionerd_blocks)
else:
new_blocks.adding(unionerd_blocks)
return new_blocks
def _unioner_blocks(blocks, dtype=None, _can_consolidate=True):
if length(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if length(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_unioner_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case total_all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatingenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no unioner
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work avalue_round NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = incontainstance(a, np.ndarray)
is_b_array = incontainstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
res = False
else:
res = op(a, b)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concating_indexes(indexes):
return indexes[0].adding(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from monkey.core.internals import make_block
panel_shape = (length(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and getting_minor
# labels, for converting to panel formating.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.total_all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(length(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.total_sum(np.array(labels).T * np.adding(mult, [1]), axis=1).T)
def _getting_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.getting_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_renagetting_ming = left.interst(right)
if length(to_renagetting_ming) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_renagetting_ming)
def lrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, lsuffix)
return x
def rrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenagetting_mingr),
_transform_index(right, rrenagetting_mingr))
def _transform_index(index, func):
"""
Apply function to total_all values found in index.
This includes transforgetting_ming multiindex entries separately.
"""
if incontainstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, umkated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the lengthgth of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * length(m))
elif incontainstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndgetting_min=1), length(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.totype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.total_all():
nv = v.clone()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.totype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatingenate_block_managers(mgrs_indexers, axes, concating_axis, clone):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concating_axis : int
clone : bool
"""
concating_plan = combine_concating_plans([getting_mgr_concatingenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concating_axis)
blocks = [make_block(concatingenate_join_units(join_units, concating_axis,
clone=clone),
placement=placement)
for placement, join_units in concating_plan]
return BlockManager(blocks, axes)
def getting_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatingenating specified units.
Returned N/A value may be None which averages there was no casting involved.
Returns
-------
dtype
na
"""
if length(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * length(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype detergetting_mination in getting_concating_dtype")
def concatingenate_join_units(join_units, concating_axis, clone):
"""
Concatenate values from several join units along selected axis.
"""
if concating_axis == 0 and length(join_units) > 1:
# Concatenating join units along ax0 is handled in _unioner_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = getting_empty_dtype_and_na(join_units)
to_concating = [ju.getting_reindexinged_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if length(to_concating) == 1:
# Only one block, nothing to concatingenate.
concating_values = to_concating[0]
if clone and concating_values.base is not None:
concating_values = concating_values.clone()
else:
concating_values = com._concating_compat(to_concating, axis=concating_axis)
return concating_values
def getting_mgr_concatingenation_plan(mgr, indexers):
"""
Construct concatingenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindexing shape , save for item axis which will be separate
# for each block whateverway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = length(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _getting_blkno_placements(blknos, length(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.clone()
shape = list(mgr_shape)
shape[0] = length(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexinging = (
length(placements) == length(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindexing its
# block: no ax0 reindexinging took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: total_all indexer locs are sequential (and
# lengthgth match is checked above).
(np.diff(ax0_blk_indexer) == 1).total_all()))
# Omit indexer if no item reindexinging is required.
if unit_no_ax0_reindexinging:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.adding((placements, unit))
return plan
def combine_concating_plans(plans, concating_axis):
"""
Combine multiple concatingenation plans into one.
existing_plan is umkated in-place.
"""
if length(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concating_axis == 0:
offset = 0
for plan in plans:
final_item_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
final_item_plc = plc
if final_item_plc is not None:
offset += final_item_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(mapping(iter, plans))
next_items = list(mapping(_next_or_none, plans))
while num_ended[0] != length(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengthgths = list( | mapping(length, placements) | pandas.compat.map |
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result = MonkeyDtype.construct_from_string("int64")
expected = MonkeyDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
MonkeyArray([1, 2, 3])
def test_collections_constructor_with_clone():
ndarray = np.array([1, 2, 3])
ser = mk.Collections(MonkeyArray(ndarray), clone=True)
assert ser.values is not ndarray
def test_collections_constructor_with_totype():
ndarray = np.array([1, 2, 3])
result = mk.Collections(MonkeyArray(ndarray), dtype="float64")
expected = mk.Collections([1.0, 2.0, 3.0], dtype="float64")
tm.assert_collections_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = MonkeyArray._from_sequence(arr, dtype="uint64")
expected = MonkeyArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_clone():
arr = np.array([0, 1])
result = MonkeyArray(arr, clone=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(whatever_numpy_array):
nparr = whatever_numpy_array
arr = | MonkeyArray(nparr) | pandas.arrays.PandasArray |
""" test feather-formating compat """
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.io.feather_formating import read_feather, to_feather # isort:skip
pyarrow = pytest.importorskip("pyarrow", getting_minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@filter_sparse
@pytest.mark.single_cpu
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestFeather:
def check_error_on_write(self, kf, exc, err_msg):
# check that we are raincontaing the exception
# on writing
with pytest.raises(exc, match=err_msg):
with tm.ensure_clean() as path:
to_feather(kf, path)
def check_external_error_on_write(self, kf):
# check that we are raincontaing the exception
# on writing
with tm.external_error_raised(Exception):
with tm.ensure_clean() as path:
to_feather(kf, path)
def check_value_round_trip(self, kf, expected=None, write_kwargs={}, **read_kwargs):
if expected is None:
expected = kf
with tm.ensure_clean() as path:
to_feather(kf, path, **write_kwargs)
result = read_feather(path, **read_kwargs)
tm.assert_frame_equal(result, expected)
def test_error(self):
msg = "feather only support IO with KnowledgeFrames"
for obj in [
mk.Collections([1, 2, 3]),
1,
"foo",
mk.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, ValueError, msg)
def test_basic(self):
kf = mk.KnowledgeFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).totype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
"bool_with_null": [True, np.nan, False],
"cat": mk.Categorical(list("abc")),
"dt": mk.DatetimeIndex(
list(mk.date_range("20130101", periods=3)), freq=None
),
"dttz": mk.DatetimeIndex(
list(mk.date_range("20130101", periods=3, tz="US/Eastern")),
freq=None,
),
"dt_with_null": [
mk.Timestamp("20130101"),
mk.NaT,
mk.Timestamp("20130103"),
],
"dtns": mk.DatetimeIndex(
list(mk.date_range("20130101", periods=3, freq="ns")), freq=None
),
}
)
kf["periods"] = mk.period_range("2013", freq="M", periods=3)
kf["timedeltas"] = mk.timedelta_range("1 day", periods=3)
kf["intervals"] = mk.interval_range(0, 3, 3)
assert kf.dttz.dtype.tz.zone == "US/Eastern"
self.check_value_round_trip(kf)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
kf = mk.KnowledgeFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).clone()
self.check_external_error_on_write(kf)
def test_stringify_columns(self):
kf = mk.KnowledgeFrame(np.arange(12).reshape(4, 3)).clone()
msg = "feather must have string column names"
self.check_error_on_write(kf, ValueError, msg)
def test_read_columns(self):
# GH 24025
kf = mk.KnowledgeFrame(
{
"col1": list("abc"),
"col2": list(range(1, 4)),
"col3": list("xyz"),
"col4": list(range(4, 7)),
}
)
columns = ["col1", "col3"]
self.check_value_round_trip(kf, expected=kf[columns], columns=columns)
def read_columns_different_order(self):
# GH 33878
kf = mk.KnowledgeFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
self.check_value_round_trip(kf, columns=["B", "A"])
def test_unsupported_other(self):
# mixed python objects
kf = mk.KnowledgeFrame({"a": ["a", 1, 2.0]})
self.check_external_error_on_write(kf)
def test_rw_use_threads(self):
kf = mk.KnowledgeFrame({"A": np.arange(100000)})
self.check_value_round_trip(kf, use_threads=True)
self.check_value_round_trip(kf, use_threads=False)
def test_write_with_index(self):
kf = mk.KnowledgeFrame({"A": [1, 2, 3]})
self.check_value_round_trip(kf)
msg = (
r"feather does not support serializing .* for the index; "
r"you can \.reseting_index\(\) to make the index into column\(s\)"
)
# non-default index
for index in [
[2, 3, 4],
mk.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
mk.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]),
]:
kf.index = index
self.check_error_on_write(kf, ValueError, msg)
# index with meta-data
kf.index = [0, 1, 2]
kf.index.name = "foo"
msg = "feather does not serialize index meta-data on a default index"
self.check_error_on_write(kf, ValueError, msg)
# column multi-index
kf.index = [0, 1, 2]
kf.columns = mk.MultiIndex.from_tuples([("a", 1)])
msg = "feather must have string column names"
self.check_error_on_write(kf, ValueError, msg)
def test_path_pathlib(self):
kf = tm.makeKnowledgeFrame().reseting_index()
result = | tm.value_round_trip_pathlib(kf.to_feather, read_feather) | pandas._testing.round_trip_pathlib |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from monkey._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from monkey._libs.tslibs.c_timestamp import integer_op_not_supported
from monkey._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from monkey._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from monkey._libs.tslibs.timestamps import RoundTo, value_round_nsint64
from monkey._typing import DatetimeLikeScalar
from monkey.compat import set_function_name
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from monkey.util._decorators import Appender, Substitution
from monkey.util._validators import validate_fillnone_kwargs
from monkey.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_whatever_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
monkey_dtype,
)
from monkey.core.dtypes.generic import ABCCollections
from monkey.core.dtypes.inference import is_array_like
from monkey.core.dtypes.missing import is_valid_nat_for_dtype, ifna
from monkey.core import missing, nanops, ops
from monkey.core.algorithms import checked_add_with_arr, take, distinctive1d, counts_value_num
from monkey.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import monkey.core.common as com
from monkey.core.indexers import check_bool_array_indexer
from monkey.core.ops.common import unpack_zerodim_and_defer
from monkey.core.ops.invalid import invalid_comparison, make_invalid_op
from monkey.tcollections import frequencies
from monkey.tcollections.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if incontainstance(other, str):
try:
# GH#18435 strings getting a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if incontainstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if ifna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif length(other) != length(self):
raise ValueError("Lengths must match")
else:
if incontainstance(other, list):
# TODO: could use mk.Index to do inference?
other = np.array(other)
if not incontainstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(total_all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.totype(object), other
)
o_mask = ifna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._ifnan
if o_mask.whatever():
result[o_mask] = nat_result
if self._hasnans:
result[self._ifnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should ctotal_all ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if whatever) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_formating):
"""
Convert to Index using specified date_formating.
Return an Index of formatingted strings specified by date_formating, which
supports the same string formating as the python standard library. Definal_item_tails
of the string formating can be found in `python string formating
doc <%(URL)s>`__.
Parameters
----------
date_formating : str
Date formating string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatingted strings.
See Also
--------
convert_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.value_round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = mk.date_range(mk.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._formating_native_types(date_formating=date_formating, na_rep=np.nan)
return result.totype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_value_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timecollections.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer ftotal_all dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shifting_forward', 'shifting_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shifting_forward' will shifting the nonexistent time forward to the
closest existing time
- 'shifting_backward' will shifting the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shifting nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Collections
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Collections with the same index for a Collections.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = mk.date_range('1/1/2018 11:59:00', periods=3, freq='getting_min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_value_round_example = """>>> rng.value_round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Collections**
>>> mk.Collections(rng).dt.value_round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Collections**
>>> mk.Collections(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceiling_example = """>>> rng.ceiling('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Collections**
>>> mk.Collections(rng).dt.ceiling("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _value_round(self, freq, mode, ambiguous, nonexistent):
# value_round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._value_round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = | value_round_nsint64(values, mode, freq) | pandas._libs.tslibs.timestamps.round_nsint64 |
"""
This module creates plots for visualizing sensitivity analysis knowledgeframes.
`make_plot()` creates a radial plot of the first and total order indices.
`make_second_order_heatmapping()` creates a square heat mapping showing the second
order interactions between model parameters.
"""
from collections import OrderedDict
import numpy as np
import monkey as mk
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool, VBar
# from bokeh.charts import Bar
def make_plot(knowledgeframe=mk.KnowledgeFrame(), highlight=[],
top=100, getting_minvalues=0.01, stacked=True, lgaxis=True,
errorbar=True, showS1=True, showST=True):
"""
Basic method to plot first and total order sensitivity indices.
This is the method to generate a Bokeh plot similar to the burtin example
template at the Bokeh website. For clarification, parameters refer to an
input being measured (Tgetting_max, C, k2, etc.) and stats refer to the 1st or
total order sensitivity index.
Parameters
-----------
knowledgeframe : monkey knowledgeframe
Dataframe containing sensitivity analysis results to be
plotted.
highlight : lst, optional
List of strings indicating which parameter wedges will be
highlighted.
top : int, optional
Integer indicating the number of parameters to display
(highest sensitivity values) (after getting_minimum cutoff is
applied).
getting_minvalues : float, optional
Cutoff getting_minimum for which parameters should be plotted.
Applies to total order only.
stacked : bool, optional
Boolean indicating in bars should be stacked for each
parameter (True) or unstacked (False).
lgaxis : bool, optional
Boolean indicating if log axis should be used (True) or if a
linear axis should be used (False).
errorbar : bool, optional
Boolean indicating if error bars are shown (True) or are
omitted (False).
showS1 : bool, optional
Boolean indicating whether 1st order sensitivity indices
will be plotted (True) or omitted (False).
showST : bool, optional
Boolean indicating whether total order sensitivity indices
will be plotted (True) or omitted (False).
**Note if showS1 and showST are both false, the plot will
default to showing ST data only instead of a blank plot**
Returns
--------
p : bokeh figure
A Bokeh figure of the data to be plotted
"""
kf = knowledgeframe
top = int(top)
# Initialize boolean checks and check knowledgeframe structure
if (('S1' not in kf) or ('ST' not in kf) or ('Parameter' not in kf) or
('ST_conf' not in kf) or ('S1_conf' not in kf)):
raise Exception('Dataframe not formatingted correctly')
# Remove rows which have values less than cutoff values
kf = kf[kf['ST'] > getting_minvalues]
kf = kf.sipna()
# Only keep top values indicated by variable top
kf = kf.sort_the_values('ST', ascending=False)
kf = kf.header_num(top)
kf = kf.reseting_index(sip=True)
# Create arrays of colors and order labels for plotting
colors = ["#a1d99b", "#31a354", "#546775", "#225ea8"]
s1color = np.array(["#31a354"]*kf.S1.size)
sTcolor = np.array(["#a1d99b"]*kf.ST.size)
errs1color = np.array(["#225ea8"]*kf.S1.size)
errsTcolor = np.array(["#546775"]*kf.ST.size)
firstorder = np.array(["1st (S1)"]*kf.S1.size)
totalorder = np.array(["Total (ST)"]*kf.S1.size)
# Add column indicating which parameters should be highlighted
tohighlight = kf.Parameter.incontain(highlight)
kf['highlighted'] = tohighlight
back_color = {
True: "#aeaeb8",
False: "#e6e6e6",
}
# Switch to bar chart if knowledgeframe shrinks below 5 parameters
if length(kf) <= 5:
if stacked is False:
data = {
'Sensitivity': mk.Collections.adding(kf.ST, kf.S1),
'Parameter': mk.Collections.adding(kf.Parameter, kf.Parameter),
'Order': np.adding(np.array(['ST']*length(kf)),
np.array(['S1']*length(kf))),
'Confidence': mk.Collections.adding(kf.ST_conf,
kf.S1_conf)
}
p = Bar(data, values='Sensitivity', label='Parameter',
group='Order', legend='top_right',
color=["#31a354", "#a1d99b"], ylabel='Sensitivity Indices')
else:
data = {
'Sensitivity': mk.Collections.adding(kf.S1, (kf.ST-kf.S1)),
'Parameter': | mk.Collections.adding(kf.Parameter, kf.Parameter) | pandas.Series.append |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calengthdar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.tcollections import offsets
from monkey._libs.tslibs import conversion
from monkey._libs.tslibs.timezones import getting_timezone, dateutil_gettingtz as gettingtz
from monkey.errors import OutOfBoundsDatetime
from monkey.compat import long, PY3
from monkey.compat.numpy import np_datetime64_compat
from monkey import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert incontainstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.getting_minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.getting_minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert gettingattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert gettingattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.getting_locales() is None else [None] + tm.getting_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calengthdar.day_name[0].capitalize()
expected_month = calengthdar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.ifnan(nan_ts.day_name(time_locale))
assert np.ifnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert incontainstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).total_all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calengthdar
assert (calengthdar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calengthdar
assert (calengthdar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gettings
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert total_all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, getting_minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.convert_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.convert_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.convert_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, getting_minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, getting_minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'getting_minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).totype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
getting_min_ts_us = np.datetime64(Timestamp.getting_min).totype('M8[us]')
getting_max_ts_us = np.datetime64(Timestamp.getting_max).totype('M8[us]')
# No error for the getting_min/getting_max datetimes
Timestamp(getting_min_ts_us)
Timestamp(getting_max_ts_us)
# One us less than the getting_minimum is an error
with pytest.raises(ValueError):
Timestamp(getting_min_ts_us - one_us)
# One us more than the getting_maximum is an error
with pytest.raises(ValueError):
Timestamp(getting_max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that sipping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_getting_min_valid(self):
# Ensure that Timestamp.getting_min is a valid Timestamp
Timestamp(Timestamp.getting_min)
def test_getting_max_valid(self):
# Ensure that Timestamp.getting_max is a valid Timestamp
Timestamp(Timestamp.getting_max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# smtotal_all)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# smtotal_all)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert getting_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.getting_min.value, Timestamp.getting_max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calengthdar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(getting_minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.value_round(Timestamp(x).value / 1e9)) ==
int(np.value_round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calengthdar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(getting_minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).getting_min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
stamp = Timestamp(val, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.getting_minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.getting_minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == 0
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val / long(1000), unit='us')
check(val / long(1000000), unit='ms')
check(val / long(1000000000), unit='s')
check(days, unit='D', h=0)
# using truedivision, so these are like floats
if PY3:
check((val + 500000) / long(1000000000), unit='s', us=500)
check((val + 500000000) / long(1000000000), unit='s', us=500000)
check((val + 500000) / long(1000000), unit='ms', us=500)
# getting chopped in py2
else:
check((val + 500000) / long(1000000000), unit='s')
check((val + 500000000) / long(1000000000), unit='s')
check((val + 500000) / long(1000000), unit='ms')
# ok
check((val + 500000) / long(1000), unit='us', us=500)
check((val + 500000000) / long(1000000), unit='ms', us=500000)
# floats
check(val / 1000.0 + 5, unit='us', us=5)
check(val / 1000.0 + 5000, unit='us', us=5000)
check(val / 1000000.0 + 0.5, unit='ms', us=500)
check(val / 1000000.0 + 0.005, unit='ms', us=5)
check(val / 1000000000.0 + 0.5, unit='s', us=500000)
check(days + 0.5, unit='D', h=12)
def test_value_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalengtht(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
assert ts.value == expected_value + 4 * 3600 * 1000000000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp('20130501T071545.123456789')
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1293840000000000010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate(object):
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
assert r == 2342145.5
def test_compare_2000(self):
r = Timestamp('2000-04-12').to_julian_date()
assert r == 2451646.5
def test_compare_2100(self):
r = Timestamp('2100-08-12').to_julian_date()
assert r == 2488292.5
def test_compare_hour01(self):
r = Timestamp('2000-08-12T01:00:00').to_julian_date()
assert r == 2451768.5416666666666666
def test_compare_hour13(self):
r = Timestamp('2000-08-12T13:00:00').to_julian_date()
assert r == 2451769.0416666666666666
class TestTimestampConversion(object):
def test_conversion(self):
# GH#9255
ts = Timestamp('2000-01-01')
result = ts.convert_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.convert_datetime64()
expected = np.datetime64(ts.value, 'ns')
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_convert_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.convert_pydatetime()
assert result == expected
def test_timestamp_convert_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_convert_datetime_dateutil(self):
stamp = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_convert_datetime_explicit_pytz(self):
stamp = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows_python_3
def test_timestamp_convert_datetime_explicit_dateutil(self):
stamp = Timestamp('20090415', tz=gettingtz('US/Eastern'), freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_convert_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.getting_max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.getting_max.convert_pydatetime()).value / 1000 ==
Timestamp.getting_max.value / 1000)
exp_warning = None if Timestamp.getting_min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp( | Timestamp.getting_min.convert_pydatetime() | pandas.Timestamp.min.to_pydatetime |
#!/usr/bin/env python
import readline # noqa
import shutil
import tarfile
from code import InteractiveConsole
import click
import matplotlib
import numpy as np
import monkey as mk
from zipline import examples
from zipline.data.bundles import register
from zipline.testing import test_resource_path, tmp_dir
from zipline.testing.fixtures import read_checked_in_benchmark_data
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.cache import knowledgeframe_cache
EXAMPLE_MODULES = examples.load_example_modules()
matplotlib.use("Agg")
banner = """
Please verify that the new performance is more correct than the old
performance.
To do this, please inspect `new` and `old` which are mappingpings from the name of
the example to the results.
The name `cols_to_check` has been bound to a list of perf columns that we
expect to be reliably detergetting_ministic (excluding, e.g. `orders`, which contains
UUIDs).
Ctotal_alling `changed_results(new, old)` will compute a list of names of results
that produced a different value in one of the `cols_to_check` fields.
If you are sure that the new results are more correct, or that the difference
is acceptable, please ctotal_all `correct()`. Otherwise, ctotal_all `incorrect()`.
Note
----
Remember to run this with the other supported versions of monkey!
"""
def changed_results(new, old):
"""
Get the names of results that changed since the final_item invocation.
Useful for verifying that only expected results changed.
"""
changed = []
for col in new:
if col not in old:
changed.adding(col)
continue
try:
assert_frame_equal(
new[col][examples._cols_to_check],
old[col][examples._cols_to_check],
)
except AssertionError:
changed.adding(col)
return changed
def eof(*args, **kwargs):
raise EOFError()
@click.command()
@click.option(
"--rebuild-input",
is_flag=True,
default=False,
help="Should we rebuild the input data from Yahoo?",
)
@click.pass_context
def main(ctx, rebuild_input):
"""Rebuild the perf data for test_examples"""
example_path = test_resource_path("example_data.tar.gz")
with tmp_dir() as d:
with tarfile.open(example_path) as tar:
tar.extracttotal_all(d.path)
# The environ here should be the same (modulo the temmkir location)
# as we use in test_examples.py.
environ = {"ZIPLINE_ROOT": d.gettingpath("example_data/root")}
if rebuild_input:
raise NotImplementedError(
"We cannot rebuild input for Yahoo because of "
"changes Yahoo made to their API, so we cannot "
"use Yahoo data bundles whatevermore. This will be fixed in "
"a future release",
)
# we need to register the bundle; it is already ingested and saved in
# the example_data.tar.gz file
@register("test")
def nop_ingest(*args, **kwargs):
raise NotImplementedError("we cannot rebuild the test buindle")
new_perf_path = d.gettingpath(
"example_data/new_perf/%s" % | mk.__version__.replacing(".", "-") | pandas.__version__.replace |
import os, sys, re
import monkey as mk
from . import header_numers, log, files
try:
from astroquery.simbad import Simbad
except ImportError:
log.error('astroquery.simbad not found!')
log.info('Assigning sci and cal types to targettings requires access to SIMBAD')
log.info('Try "sudo pip insttotal_all astroquery"')
raise ImportError
sys.exit()
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
from astropy import units as u
from requests.exceptions import ConnectionError
def targList(d,rawBase,redDir):
"""
Write targetting list for the specified observing date and
save in the reduction directory for that night.
- d is a date string: YYYYMmmDD e.g. 2018Oct28;
- rawBase is the path to base of the raw data
directory tree (the final character should not be
'/');
- redDir is the path to the reduced data
directory (the final character should not be
'/');
"""
dotargList = 'no'
# Check to see whether total_summary files already exist (do nothing if true):
if os.path.isfile(redDir+'/'+d+'_targettings.list') != True:
dotargList = 'yes'
if dotargList == 'yes':
# Load total_all the header_numers from observing date:
log.info('Read header_numers from raw data directory')
hdrs = header_numers.loaddir(rawBase+'/'+d)
# create python list of object names:
log.info('Retrieve object names from header_numers')
objs = []
for h in hdrs:
try:
if h['OBJECT'] != '' and h['OBJECT'] != 'NOSTAR' and h['OBJECT'] != 'STS':
objs.adding(h['OBJECT'])
except KeyError:
log.warning('Not total_all header_numers contain OBJECT key word.')
log.info('Continuing.')
log.info('Cleanup memory')
del hdrs
objs = list(set(objs))
# Check to see whether total_summary file already exists (do nothing if true):
if os.path.isfile(redDir+'/'+d+'_targettings.list') != True:
files.ensure_dir(redDir);
# write targetting list total_summary file:
log.info('Write '+redDir+'/'+d+'_targettings.list')
with open(redDir+'/'+d+'_targettings.list', 'w') as output:
for obj in objs:
if type(obj) != str:
objs.remove(obj)
output.write(obj+'\n')
if length(objs) == 0:
log.error('No targetting names retrieved from header_numers.')
log.info('Exiting.')
sys.exit()
else:
log.info('File written successfully')
else:
log.info('Targetting lists already exist.')
log.info('Reading targetting names from '+redDir+'/'+d+'_targettings.list')
objs = []
with open(redDir+'/'+d+'_targettings.list', 'r') as input:
for line in input:
objs.adding(line.strip().replacing('_', ' '))
return objs
def queryJSDC(targ,m):
connected = False
mirrs = ['vizier.u-strasbg.fr','vizier.nao.ac.jp','vizier.hia.nrc.ca',
'vizier.ast.cam.ac.uk','vizier.cfa.harvard.edu','vizier.china-vo.org',
'www.ukirt.jach.hawaii.edu','vizier.iucaa.ernet.in']
Vizier.VIZIER_SERVER = mirrs[m]
try:
result = Vizier.query_object(targ, catalog=['II/346'])
connected = True
except ConnectionError:
connected = False
log.warning(mirrs[m]+' VizieR server down')
while connected == False:
try:
Vizier.VIZIER_SERVER=mirrs[m+1]
except IndexError:
log.error('Failed to connect to VizieR mirrors')
log.error('Check internet connection and retry')
sys.exit()
try:
result = Vizier.query_object(targ, catalog=['II/346'])
connected = True
log.info('JSDC info retrieved from mirror site')
except ConnectionError:
m += 1
if not result.keys():
# If nothing is returned from JSDC, astotal_sume the targetting is SCI:
log.info('Nothing returned from JSDC for '+targ)
log.info(targ+' will be treated as SCI')
return 'sci'
ind = -999
alt_ids = Simbad.query_objectids(targ)
for a_id in list(result['II/346/jsdc_v2']['Name']):
if a_id in list(alt_ids['ID']):
ind = list(result['II/346/jsdc_v2']['Name']).index(a_id)
elif a_id in list([a.replacing(' ', '') for a in alt_ids['ID']]):
ind = list(result['II/346/jsdc_v2']['Name']).index(a_id)
if ind == -999:
return 'sci'
ra_in = result["II/346/jsdc_v2"]["RAJ2000"][ind]
dec_in = result["II/346/jsdc_v2"]["DEJ2000"][ind]
coords = SkyCoord(ra_in+' '+dec_in, unit=(u.hourangle, u.deg))
ra = str(coords.ra.deg)
dec = str(coords.dec.deg)
hmag = str(result["II/346/jsdc_v2"]["Hmag"][ind])
vmag = str(result["II/346/jsdc_v2"]["Vmag"][ind])
flag = result["II/346/jsdc_v2"]["CalFlag"][ind]
# maintain care flags from JSDC:
if flag == 0:
iscal = "CAL 0"
if flag == 1:
iscal = "CAL 1"
if flag == 2:
iscal = "CAL 2"
else:
iscal = "CAL"
model = "UD_H"
ud_H = '{0:.6f}'.formating(float(result["II/346/jsdc_v2"]["UDDH"][ind]))
eud_H = '{0:.6f}'.formating(float(result["II/346/jsdc_v2"]["e_LDD"][ind]))
return ''.join(str([ra, dec, hmag, vmag, iscal, model, ud_H, eud_H])[1:-1]).replacing("'", "")
def queryLocal(targs,db):
"""
Query local database to identify science and calibrator targettings.
Ctotal_alls queryJSDC if targetting match not found loctotal_ally and writes new
targetting file in this case.
- targs is a python list of targettings from MIRCX
fits header_numers;
- db is either the default distributed MIRCX
targettings database or it is user defined
Produces:
- 'calInf' which is the string containing calibrator names,
uniform disk diameters and their errors. This will be
parsed to mircx_calibrate.py.
- 'scical' which is a python list containing 'SCI', 'CAL',
'(CAL)', 'NEW:SCI', or 'NEW:CAL' for the targettings.
"""
mirrs = ['vizier.u-strasbg.fr','vizier.nao.ac.jp','vizier.hia.nrc.ca',
'vizier.ast.cam.ac.uk','vizier.cfa.harvard.edu','vizier.china-vo.org',
'www.ukirt.jach.hawaii.edu','vizier.iucaa.ernet.in']
localDB = mk.read_csv(db)
m_targs = | mk.Collections.convert_list(localDB['#NAME']) | pandas.Series.tolist |
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0= | kf.fillnone(0) | pandas.DataFrame.fillna |
import monkey as mk
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity getting request to LittleSis API, by name input rather than id
input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'total_summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'umkated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships getting request to LittleSis API, by name input rather
than id input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def basic_entity(name):
"""
Creates monkey knowledgeframe for one indivisionidual or entity with basic informatingion from
entity getting request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 informatingion or entity for which informatingion is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
kf = mk.KnowledgeFrame(list(data2.items()))
kf.columns = ['info', 'value']
kf = mk.pivot(kf, columns = 'info', values = 'value')
kf = kf.fillnone(method='bfill', axis=0)
kf = kf.iloc[:1, :]
kf = kf[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
kf.renagetting_ming(columns = {'start_date': 'date_of_birth'}, inplace = True)
return kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def list_entities(*args):
"""
Concatenates knowledgeframes created by basic_table() for entity getting requests to LittleSis
API, resulting in monkey knowledgeframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like final_item name only entries).
Parameters
----------
*args: List of names of indivisioniduals or entities for which to include informatingion in the resluting knowledgeframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles Lakers—F 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_kfs = []
for name in args:
kf = basic_entity(name)
list_of_kfs.adding(kf)
combined_kf = mk.concating(list_of_kfs, ignore_index=True)
return combined_kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
if response.status_code != 200:
raise Exception('API response: {}'.formating(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_kf(name):
"""
Creates monkey knowledgeframe with informatingion from relationships getting request to LittleSis
API.
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> relationships_kf('<NAME>')
primary_entity related_entity amount currency \
0 Children’s Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'amount', 'currency', 'description1', 'goods', 'filings', 'description', 'start_date',
'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'primary_entity','entity1_id': 'related_entity', 'description1':'category'}, inplace = True)
return blurbs
def timelines(name):
"""
Creates knowledgeframe specifictotal_ally from timeline informatingion of relationships from
relationships getting request on LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> timelines('<NAME>')
earched_entity related_entity start_date \
0 Children’s Aid Society <NAME> None
1 <NAME> <NAME> None
...
end_date is_current
0 None None
1 None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'start_date', 'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'searched_entity','entity1_id': 'related_entity'}, inplace = True)
return blurbs
def bio(name):
"""
Provides paragraph biography/backgvalue_round description of 1 indivisionidual or entity from an entity getting request on LittleSis API. Resorts to
entity with the highest number of relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which biographical informatingion is desired.
Example
-------
>>> bio('<NAME>')
'The 44th President of the United States, he was sworn into office on January 20,
2009; born in Honolulu, Hawaii, August 4, 1961; obtained early education in Jakarta,
Indonesia, and Hawaii; continued education at Occidental College, Los Angeles, Calif.;
received a B.A. in 1983 from Columbia University, New York City; worked as a community
organizer in Chicago, Ill.; studied law at Harvard University, where he became the
first African American president of the Harvard Law Review, and received J.D. in 1991;
lecturer on constitutional law, University of Chicago; member, Illinois State senate
1997-2004; elected as a Democrat to the U.S. Senate in 2004 for term beginning January
3, 2005.'
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
response2 = response2['data']['attributes']['total_summary']
return response2
def lists(name):
"""
Provides list of total_all lists that the entity belongs to on the LittleSis website, from a
LittleSis lists getting request. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list of list memberships is desired.
Example
-------
>>> lists('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011)
The World's Highest Paid Celebrities (2017)
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = mk.KnowledgeFrame.convert_dict(data)
names = mk.KnowledgeFrame(data['attributes'])
names = mk.KnowledgeFrame.convert_dict(names)
for key, value in names.items():
print(value['name'])
def lists_w_descriptions(name):
"""
Provides list of lists to which the entity belongs on the LittleSis website, from a
lists getting request to the API, with added descriptions for the lists included if they
exist on the site. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which list of list membership is
desired.
Example
-------
>>> lists_w_descriptions('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011) (description: The 100 most
powerful athletes on and off the field. No coaches, owners, managers, executives or
retired athletes were considered. Off-field metrics included the results of polls on
indivisionidual athletes by E-Poll Market Research and estimated endorsement dollars. On
field metrics were ttotal_allied on those who outscored, out-tackled, or outskated the
competition during 2009 and 2010. Sports were weighted according to their popularity
in the U.S. )
The World's Highest Paid Celebrities (2017) (description: FORBES' annual ranking of
the highest-earning entertainers in the world, published June 12 2017. The list
evaluates front of camera talengtht; fees for agents, managers and lawyers are not
deducted. )
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = mk.KnowledgeFrame.convert_dict(data)
names = mk.KnowledgeFrame(data['attributes'])
names = mk.KnowledgeFrame.convert_dict(names)
for key, value in names.items():
print(value['name'], '(description:', value['description'],')')
def relationship_blurbs(name):
"""
Provides a list of blurbs from the relationship getting request to the LittleSis API,
total_allowing for inspection of total_all relationships for the requested entity. Resorts to entity with the highest number of relationships
listed for entries that point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list.
Example
-------
>>> relationship_blurbs('<NAME>')
<NAME> gave money to Children’s Aid Society
<NAME> and <NAME> are/were in a family
<NAME> and <NAME> are/were business partners
<NAME> and <NAME> have/had a professional relationship
<NAME> has a position (Founder ) at James Family Foundation
<NAME> and <NAME> are/were business partners
<NAME> is an owner of Blaze Pizza LLC
<NAME> has a position (Co founder ) at Klutch Sports
<NAME> gave money to Democratic National Committee
<NAME> gave money to Democratic White House Victory Fund
<NAME> and <NAME> have/had a professional relationship
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = mk.KnowledgeFrame.convert_dict(blurbs)
for key, value in blurbs.items():
print(value['description'])
def relationship_blurbs_w_amounts(name):
"""
Provides a list of blurbs from the relationship getting request to the LittleSis API,
total_allowing for inspection of total_all relationships for the requested entity, and includes number amounts of donation size alongside each
blurb. Resorts to entity with the highest number of relationships listed for entries that point to multiple entites (like final_item name
only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list.
Example
-------
>>> relationship_blurbs_w_amounts('<NAME>')
<NAME> gave money to Children’s Aid Society None
<NAME> and <NAME> are/were in a family None
<NAME> and <NAME> are/were business partners None
<NAME> and <NAME> have/had a professional relationship None
<NAME> has a position (Founder ) at James Family Foundation None
M<NAME> and <NAME> are/were business partners None
<NAME> is an owner of Blaze Pizza LLC None
<NAME> has a position (Co founder ) at Klutch Sports None
<NAME> gave money to Democratic National Committee 20000
<NAME> gave money to Democratic White House Victory Fund 20000
<NAME> and <NAME> have/had a professional relationship None
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = | mk.KnowledgeFrame.convert_dict(blurbs) | pandas.DataFrame.to_dict |
"""
Collection of tests asserting things that should be true for
whatever index subclass. Makes use of the `indices` fixture defined
in monkey/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from monkey._libs.tslibs import iNaT
from monkey.core.dtypes.common import is_period_dtype, needs_i8_conversion
import monkey as mk
from monkey import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import monkey._testing as tm
class TestCommon:
def test_siplevel(self, index):
# GH 21115
if incontainstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.siplevel([]).equals(index)
for level in index.name, [index.name]:
if incontainstance(index.name, tuple) and level is index.name:
# GH 21121 : siplevel with tuple name
continue
with pytest.raises(ValueError):
index.siplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.siplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if incontainstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renagetting_mingd = [["1"]]
# With .renagetting_ming()
with pytest.raises(TypeError, match=message):
index.renagetting_ming(name=renagetting_mingd)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renagetting_mingd)
def test_constructor_unwraps_index(self, index):
if incontainstance(index, mk.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_gettingitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# Test clone.union(clone)
first = index.clone().set_names(fname)
second = index.clone().set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test clone.union(empty)
first = index.clone().set_names(fname)
second = index.sip(index).set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(clone)
first = index.sip(index).set_names(fname)
second = index.clone().set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.sip(index).set_names(fname)
second = index.sip(index).set_names(sname)
union = first.union(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# test clone.union(subset) - need sort for unicode and string
first = index.clone().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_the_values()
expected = index.set_names(expected_name).sort_the_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersts with various name combinations
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# Test clone.interst(clone)
first = index.clone().set_names(fname)
second = index.clone().set_names(sname)
intersect = first.interst(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test clone.interst(empty)
first = index.clone().set_names(fname)
second = index.sip(index).set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.interst(clone)
first = index.sip(index).set_names(fname)
second = index.clone().set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.interst(empty)
first = index.sip(index).set_names(fname)
second = index.sip(index).set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# test clone.interst(subset) - need sort for unicode and string
first = index.clone().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.interst(second).sort_the_values()
expected = index[1:].set_names(expected_name).sort_the_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if incontainstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if incontainstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.renagetting_ming(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right lengthgth
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# renagetting_ming in place just leaves tuples and other containers alone
name = ("A", "B")
index.renagetting_ming(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_clone_and_deepclone(self, index):
from clone import clone, deepclone
if incontainstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (clone, deepclone):
idx_clone = func(index)
assert idx_clone is not index
assert idx_clone.equals(index)
new_clone = index.clone(deep=True, name="banana")
assert new_clone.name == "banana"
def test_distinctive(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if incontainstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.sip_duplicates()
for level in 0, index.name, None:
result = index.distinctive(level=level)
tm.assert_index_equal(result, expected)
msg = "Too mwhatever levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.distinctive(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.distinctive(level="wrong")
def test_getting_distinctive_index(self, index):
# MultiIndex tested separately
if not length(index) or incontainstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_distinctive = index[[0]]
# We test against `idx_distinctive`, so first we make sure it's distinctive
# and doesn't contain nans.
assert idx_distinctive.is_distinctive is True
try:
assert idx_distinctive.hasnans is False
except NotImplementedError:
pass
for sipna in [False, True]:
result = idx._getting_distinctive_index(sipna=sipna)
tm.assert_index_equal(result, idx_distinctive)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = mk.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_distinctive = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_distinctive = type(index._data)._simple_new(vals_distinctive, dtype=index.dtype)
idx_nan = index._shtotal_allow_clone(vals)
idx_distinctive_nan = index._shtotal_allow_clone(vals_distinctive)
assert idx_distinctive_nan.is_distinctive is True
assert idx_nan.dtype == index.dtype
assert idx_distinctive_nan.dtype == index.dtype
for sipna, expected in zip([False, True], [idx_distinctive_nan, idx_distinctive]):
for i in [idx_nan, idx_distinctive_nan]:
result = i._getting_distinctive_index(sipna=sipna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not length(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if incontainstance(index, (MultiIndex, mk.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# detergetting_mine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).arggetting_min()
if expected_right == 0:
# total_all values are the same, expected_right should be lengthgth
expected_right = length(index)
# test _searchsorted_monotonic in total_all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = | tm.value_round_trip_pickle(index) | pandas._testing.round_trip_pickle |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3= | mk.Collections.convert_list(d1[0:16][3]) | pandas.Series.tolist |
from scipy.signal import butter, lfilter, resample_by_num, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import monkey as np
import matplotlib.pyplot as plt
import scipy
import monkey as mk
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def getting_name(self):
return 'img-spec-{}'.formating(self.size)
def sip_zeros(self, kf):
return kf[(kf.T != 0).whatever()]
def employ(self, data):
data = mk.KnowledgeFrame(data.T)
data = self.sip_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmapping=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.adding(im)
return channels
class UnitScale:
"""
Scale across the final_item axis.
"""
def getting_name(self):
return 'unit-scale'
def employ(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def getting_name(self):
return 'unit-scale-feat'
def employ(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the final_item axis.
"""
def getting_name(self):
return "fft"
def employ(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
employ ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def getting_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def employ(self, data):
# employ pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample_by_num:
"""
Resample_by_num time-collections data.
"""
def __init__(self, sample_by_num_rate):
self.f = sample_by_num_rate
def getting_name(self):
return "resample_by_num%d" % self.f
def employ(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample_by_num(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def getting_name(self):
return "mag"
def employ(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def getting_name(self):
return 'lpf%d' % self.f
def employ(self, data):
nyq = self.f / 2.0
cutoff = getting_min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# employ filter over each channel
for j in range(length(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel averages
"""
def getting_name(self):
return 'average'
def employ(self, data):
axis = data.ndim - 1
return data.average(axis=axis)
class Abs:
"""
extract channel averages
"""
def getting_name(self):
return 'abs'
def employ(self, data):
return np.abs(data)
class Stats:
"""
Subtract the average, then take (getting_min, getting_max, standard_deviation) for each channel.
"""
def getting_name(self):
return "stats"
def employ(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(length(data)):
ch_data = data[i]
ch_data = data[i] - np.average(ch_data)
outi = out[i]
outi[0] = np.standard(ch_data)
outi[1] = np.getting_min(ch_data)
outi[2] = np.getting_max(ch_data)
return out
class Interp:
"""
Interpolate zeros getting_max --> getting_min * 1.0
NOTE: try different methods later
"""
def getting_name(self):
return "interp"
def employ(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.getting_max(data)
data[indices] = ( | np.getting_min(data) | pandas.min |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = | validate_func_kwargs(kwargs) | pandas.core.apply.validate_func_kwargs |
import monkey
from _benchmark_utility import plot_compare, bar_plot_compare
import csv
import pathlib
import monkey as mk
def plot_bench_file(path):
sizes = [(10 ** i) for i in range(2, 8, 1)]
monkey = []
umbra_cte = []
umbra_view = []
postgres_cte = []
postgres_view = []
postgres_view_mat = []
with pathlib.Path(path).open("r") as f:
for i, line in enumerate(f.readlines()):
line_parts = line.split(", ")
# print(line_parts[-1])
if length(line_parts) <= 1 or line_parts[-1] == "#\n":
continue
pipeline_name = line_parts[0]
pipeline_part = line_parts[1]
exec_definal_item_tail = line_parts[2]
mode = line_parts[4]
materialized = line_parts[5]
time = float(line_parts[7])
if line_parts[6] == "Monkey":
| monkey.adding(time) | pandas.append |
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(kf.fillnone(method='ffill')) # 바로 위의 값으로 대체
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 -1.348020
# 2 0.157068 0.063360 0.860016
# 3 0.525265 0.063360 -1.482895
# 4 -0.396621 0.958787 -1.482895
print(kf.fillnone(method='pad')) # 전방위의 값으로 대체
# c1 c2 c3
# 0 NaN -0.615965 -0.320598
# 1 NaN -1.488840 -0.320598
# 2 0.108199 -1.488840 -0.415326
# 3 0.521409 -1.488840 -1.533373
# 4 1.523713 -0.104133 -1.533373
print( | kf.fillnone(method='bfill') | pandas.DataFrame.fillna |
import operator
from shutil import getting_tergetting_minal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from monkey._config import getting_option
from monkey._libs import algos as libalgos, hashtable as htable
from monkey._typing import ArrayLike, Dtype, Ordered, Scalar
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from monkey.util._validators import validate_bool_kwarg, validate_fillnone_kwargs
from monkey.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_convert_datetimelike,
)
from monkey.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.generic import ABCIndexClass, ABCCollections
from monkey.core.dtypes.inference import is_hashable
from monkey.core.dtypes.missing import ifna, notna
from monkey.core import ops
from monkey.core.accessor import MonkeyDelegate, delegate_names
import monkey.core.algorithms as algorithms
from monkey.core.algorithms import _getting_data_algo, factorize, take, take_1d, distinctive1d
from monkey.core.array_algos.transforms import shifting
from monkey.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from monkey.core.base import NoNewAttributesMixin, MonkeyObject, _shared_docs
import monkey.core.common as com
from monkey.core.construction import array, extract_array, sanitize_array
from monkey.core.indexers import check_array_indexer, deprecate_ndim_indexing
from monkey.core.missing import interpolate_2d
from monkey.core.ops.common import unpack_zerodim_and_defer
from monkey.core.sorting import nargsort
from monkey.io.formatings import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and length(other) != length(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if incontainstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if length(self.categories) != length(other.categories):
raise TypeError(msg + " Categories are different lengthgths")
elif self.ordered and not (self.categories == other.categories).total_all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _getting_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = gettingattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.whatever():
# In other collections, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.getting_loc(other)
ret = gettingattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(length(self), dtype=bool)
elif opname == "__ne__":
return np.ones(length(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# total_allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return gettingattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mappingping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before ctotal_alling this method.
"""
hash(key)
# getting location of key in categories.
# If a KeyError, the key isn't in categories, so logictotal_ally
# can't be in container either.
try:
loc = cat.categories.getting_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return whatever(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, MonkeyObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usutotal_ally fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisionisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replacingd with NaN.
categories : Index-like (distinctive), optional
The distinctive categories for this categorical. If not given, the
categories are astotal_sumed to be the distinctive values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://monkey.pydata.org/monkey-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> mk.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> mk.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a getting_min and getting_max value.
>>> c = mk.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.getting_min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# convert_list is not actutotal_ally deprecated, just suppressed in the __dir__
_deprecations = MonkeyObject._deprecations | frozenset(["convert_list"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.umkate_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This averages: only missing values in list-likes (not arrays/nkframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not incontainstance(values, (ABCIndexClass, ABCCollections)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_convert_datetimelike(values, convert_dates=True)
if not incontainstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if length(values) == 0 else None
null_mask = ifna(values)
if null_mask.whatever():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if incontainstance(values, ABCCollections) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _getting_codes_for_values(values, dtype.categories)
if null_mask.whatever():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.umkate_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting total_allocates new values to each category (effectively a renagetting_ming of
each indivisionidual category).
The total_allocateed value has to be a list-like object. All items must be
distinctive and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and length(self.dtype.categories) != length(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~monkey.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, clone=False):
return Categorical(scalars, dtype=dtype)
def _formatingter(self, boxed=False):
# Defer to CategoricalFormatter's formatingter.
return None
def clone(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.clone(), dtype=self.dtype, fastpath=True
)
def totype(self, dtype: Dtype, clone: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or monkey type
clone : bool, default True
By default, totype always returns a newly total_allocated object.
If clone is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.umkate_dtype(dtype)
self = self.clone() if clone else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, clone=clone) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.ifna().whatever():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, clone=clone)
@cache_readonly
def size(self) -> int:
"""
Return the length of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def convert_list(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = convert_list
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from monkey import Index, to_num, convert_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
incontainstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_num(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = convert_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.incontain(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.clone()
categories = cats.sort_the_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usutotal_ally done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be distinctive.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used togettingher with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = mk.CategoricalDtype(['a', 'b'], ordered=True)
>>> mk.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if ifna(codes).whatever():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if length(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if length(codes) and (codes.getting_max() >= length(dtype.categories) or codes.getting_min() < -1):
raise ValueError("codes need to be between -1 and length(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for distinctiveness or nulls
Examples
--------
>>> c = mk.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(mk.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and length(new_dtype.categories) != length(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly umkating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do whatever validation here. It's astotal_sumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a clone of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.clone()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a clone of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a clone of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, renagetting_ming=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `renagetting_ming==True`, the categories will simple be renagetting_mingd
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
perforgetting_ming the indivisionidual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprincontaing changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered informatingion.
renagetting_ming : bool, default False
Whether or not the new_categories should be considered as a renagetting_ming
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a clone
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.clone()
if renagetting_ming:
if cat.dtype.categories is not None and length(new_dtype.categories) < length(
cat.dtype.categories
):
# remove total_all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= length(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def renagetting_ming_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or ctotal_allable
New categories which will replacing old categories.
* list-like: total_all items must be distinctive and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mappingping from
old categories to new. Categories not contained in the mappingping
are passed through and extra categories in the mappingping are
ignored.
* ctotal_allable : a ctotal_allable that is ctotal_alled on total_all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to renagetting_ming the categories inplace or return a clone of
this categorical with renagetting_mingd categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = mk.Categorical(['a', 'a', 'b'])
>>> c.renagetting_ming_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.renagetting_ming_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a ctotal_allable to create the new categories
>>> c.renagetting_ming_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.clone()
if is_dict_like(new_categories):
cat.categories = [new_categories.getting(item, item) for item in cat.categories]
elif ctotal_allable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include total_all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered informatingion.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a clone of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain total_all old category items or whatever
new ones
See Also
--------
renagetting_ming_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the final_item/highest place in the
categories and will be unused directly after this ctotal_all.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a clone of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if length(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.clone()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a clone of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if whatever(ifna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if length(not_included) != 0:
raise ValueError(f"removals must total_all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, renagetting_ming=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to sip unused categories inplace or return a clone of
this categorical with unused categories sipped.
Returns
-------
cat : Categorical with unused categories sipped or None if inplace.
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.clone()
idx, inv = np.distinctive(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def mapping(self, mappingper):
"""
Map categories using input correspondence (dict, Collections, or function).
Maps the categories to new categories. If the mappingping correspondence is
one-to-one the result is a :class:`~monkey.Categorical` which has the
same order property as the original, otherwise a :class:`~monkey.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~monkey.Collections` is used whatever unmappingped category is
mappingped to `NaN`. Note that if this happens an :class:`~monkey.Index`
will be returned.
Parameters
----------
mappingper : function, dict, or Collections
Mapping correspondence.
Returns
-------
monkey.Categorical or monkey.Index
Mapped categorical.
See Also
--------
CategoricalIndex.mapping : Apply a mappingping correspondence on a
:class:`~monkey.CategoricalIndex`.
Index.mapping : Apply a mappingping correspondence on an
:class:`~monkey.Index`.
Collections.mapping : Apply a mappingping correspondence on a
:class:`~monkey.Collections`.
Collections.employ : Apply more complex functions on a
:class:`~monkey.Collections`.
Examples
--------
>>> cat = mk.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.mapping(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.mapping({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mappingping is one-to-one the ordering of the categories is
preserved:
>>> cat = mk.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.mapping({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mappingping is not one-to-one an :class:`~monkey.Index` is returned:
>>> cat.mapping({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, total_all unmappingped categories are mappingped to `NaN` and
the result is an :class:`~monkey.Index`:
>>> cat.mapping({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.mapping(mappingper)
try:
return self.from_codes(
self._codes.clone(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.whatever(self._codes == -1):
new_categories = new_categories.insert(length(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Collections/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([length(self._codes)])
def shifting(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shiftinged : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make whatever sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shifting(codes.clone(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raincontaing ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if ifna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.getting_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ getting's total_all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for total_all other cases, raise for now (similarly as what happens in
# Collections.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not incontainstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we getting much faster performance.
if is_scalar(value):
codes = self.categories.getting_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.getting_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def ifna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
ifna : Top-level ifna.
ifnull : Alias of ifna.
Categorical.notna : Boolean inverse of Categorical.ifna.
"""
ret = self._codes == -1
return ret
ifnull = ifna
def notna(self):
"""
Inverse of ifna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.ifna : Boolean inverse of Categorical.notna.
"""
return ~self.ifna()
notnull = notna
def sipna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def counts_value_num(self, sipna=True):
"""
Return a Collections containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
sipna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Collections
See Also
--------
Collections.counts_value_num
"""
from monkey import Collections, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = length(cat), 0 <= code
ix, clean = np.arange(ncat), mask.total_all()
if sipna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, getting_minlengthgth=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.adding(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Collections(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_getting_values(self):
"""
Return the values.
For internal compatibility with monkey formatingting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.totype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'unionersort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping togettingher
based on matching category values. Thus, this function can be
ctotal_alled on an unordered Categorical instance unlike the functions
'Categorical.getting_min' and 'Categorical.getting_max'.
Examples
--------
>>> mk.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = mk.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = mk.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_the_values(self, inplace=False, ascending=True, na_position="final_item"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping togettingher based on
matching category values. Thus, this function can be ctotal_alled on an
unordered Categorical instance unlike the functions 'Categorical.getting_min'
and 'Categorical.getting_max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'final_item'} (optional, default='final_item')
'first' puts NaNs at the beginning
'final_item' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Collections.sort_the_values
Examples
--------
>>> c = mk.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_the_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_the_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_the_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = mk.Categorical([1, 2, 2, 1, 5])
'sort_the_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = mk.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_the_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_the_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_the_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_the_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["final_item", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from monkey import Collections
if self.ordered:
values = self.codes
mask = values == -1
if mask.whatever():
values = values.totype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.renagetting_ming_categories(Collections(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillnone(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Collections
If a scalar value is passed it is used to fill total_all missing values.
Alternatively, a Collections or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexinged Collections
pad / ffill: propagate final_item valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the getting_maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partitotal_ally filled. If method is not specified, this is the
getting_maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillnone_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillnone has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, length(self))
values = interpolate_2d(values, method, 0, None, value).totype(
self.categories.dtype
)[0]
codes = _getting_codes_for_values(values, self.categories)
else:
# If value is a dict or a Collections (a dict value has already
# been converted to a Collections)
if incontainstance(value, (np.ndarray, Categorical, ABCCollections)):
# We getting ndarray or Categorical if ctotal_alled via Collections.fillnone,
# where it will unwrap another aligned Collections before gettingting here
mask = ~algorithms.incontain(value, self.categories)
if not ifna(value[mask]).total_all():
raise ValueError("fill value must be in categories")
values_codes = _getting_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.clone()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Collections it should be a scalar
elif is_hashable(value):
if not ifna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.whatever():
codes = codes.clone()
if ifna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.getting_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Collections, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, total_allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The averageing of negative values in
`indexer` depends on the value of `total_allow_fill`.
total_allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``total_allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Collections.take : Similar method for Collections.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = mk.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``total_allow_fill==False`` to have negative indices average indexing
from the right.
>>> cat.take([0, -1, -2], total_allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``total_allow_fill=True``, indices equal to ``-1`` average "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], total_allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], total_allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if total_allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, total_allow_fill=total_allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, total_allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, total_allow_fill=total_allow_fill, fill_value=fill_value)
def __length__(self) -> int:
"""
The lengthgth of this Categorical.
"""
return length(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_getting_values().convert_list())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if whatever NaN is in self.
if is_scalar(key) and ifna(key):
return self.ifna().whatever()
return contains(self, key, container=self._codes)
def _tidy_repr(self, getting_max_vals=10, footer=True) -> str:
"""
a short repr displaying only getting_max_vals and an optional (but default
footer)
"""
num = getting_max_vals // 2
header_num = self[:num]._getting_repr(lengthgth=False, footer=False)
final_item_tail = self[-(getting_max_vals - num) :]._getting_repr(lengthgth=False, footer=False)
result = f"{header_num[:-1]}, ..., {final_item_tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
getting_max_categories = (
10
if getting_option("display.getting_max_categories") == 0
else getting_option("display.getting_max_categories")
)
from monkey.io.formatings import formating as fmt
if length(self.categories) > getting_max_categories:
num = getting_max_categories // 2
header_num = fmt.formating_array(self.categories[:num], None)
final_item_tail = fmt.formating_array(self.categories[-num:], None)
category_strs = header_num + ["..."] + final_item_tail
else:
category_strs = fmt.formating_array(self.categories, None)
# Strip total_all leading spaces, which formating_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader_numer = f"Categories ({length(self.categories)}, {dtype}): "
width, height = getting_tergetting_minal_size()
getting_max_width = getting_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
getting_max_width = 0
levstring = ""
start = True
cur_col_length = length(levheader_numer) # header_numer
sep_length, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if getting_max_width != 0 and cur_col_length + sep_length + length(val) > getting_max_width:
levstring += linesep + (" " * (length(levheader_numer) + 1))
cur_col_length = length(levheader_numer) + 1 # header_numer + a whitespace
elif not start:
levstring += sep
cur_col_length += length(val)
levstring += val
start = False
# replacing to simple save space by
return levheader_numer + "[" + levstring.replacing(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {length(self)}\n{info}"
def _getting_repr(self, lengthgth=True, na_rep="NaN", footer=True) -> str:
from monkey.io.formatings import formating as fmt
formatingter = fmt.CategoricalFormatter(
self, lengthgth=lengthgth, na_rep=na_rep, footer=footer
)
result = formatingter.convert_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_getting_maxlength = 10
if length(self._codes) > _getting_maxlength:
result = self._tidy_repr(_getting_maxlength)
elif length(self._codes) > 0:
result = self._getting_repr(lengthgth=length(self) > _getting_maxlength)
else:
msg = self._getting_repr(lengthgth=False, footer=True).replacing("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if incontainstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.totype(self._codes.dtype)
return indexer
def __gettingitem__(self, key):
"""
Return an item.
"""
if incontainstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
"""
Item total_allocatement.
Raises
------
ValueError
If (one or more) Value is not in categories or if a total_allocateed
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if incontainstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from monkey import Index
to_add = Index(rvalue).difference(self.categories)
# no total_allocatements of values not in categories, but it's always ok to set
# something to np.nan
if length(to_add) and not ifna(to_add).total_all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if incontainstance(key, (int, np.integer)):
pass
# tuple of indexers (knowledgeframe)
elif incontainstance(key, tuple):
# only total_allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if length(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif length(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Collections or Categorical
elif incontainstance(key, slice):
pass
# else: array of True/False in Collections or Categorical
lindexer = self.categories.getting_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = mk.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsorting_indexer(
self.codes.totype("int64"), categories.size
)
counts = counts.cumtotal_sum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, _result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = gettingattr(self, name, None)
if func is None:
raise TypeError(f"Categorical cannot perform the operation {name}")
return func(**kwargs)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def getting_min(self, skipna=True):
"""
The getting_minimum value of the object.
Only ordered `Categoricals` have a getting_minimum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
getting_min : the getting_minimum of this `Categorical`
"""
self.check_for_ordered("getting_min")
if not length(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.total_all():
if skipna and good.whatever():
pointer = self._codes[good].getting_min()
else:
return np.nan
else:
pointer = self._codes.getting_min()
return self.categories[pointer]
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def getting_max(self, skipna=True):
"""
The getting_maximum value of the object.
Only ordered `Categoricals` have a getting_maximum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
getting_max : the getting_maximum of this `Categorical`
"""
self.check_for_ordered("getting_max")
if not length(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.total_all():
if skipna and good.whatever():
pointer = self._codes[good].getting_max()
else:
return np.nan
else:
pointer = self._codes.getting_max()
return self.categories[pointer]
def mode(self, sipna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
sipna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if sipna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), sipna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def distinctive(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
distinctive. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
distinctive values : ``Categorical``
See Also
--------
monkey.distinctive
CategoricalIndex.distinctive
Collections.distinctive
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> mk.Categorical(list("baabc")).distinctive()
[b, a, c]
Categories (3, object): [b, a, c]
>>> mk.Categorical(list("baabc"), categories=list("abc")).distinctive()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> mk.Categorical(
... list("baabc"), categories=list("abc"), ordered=True
... ).distinctive()
[b, a, c]
Categories (3, object): [a < b < c]
"""
# unlike np.distinctive, distinctive1d does not sort
distinctive_codes = distinctive1d(self.codes)
cat = self.clone()
# keep nan in codes
cat._codes = distinctive_codes
# exclude nan from indexer for categories
take_codes = distinctive_codes[distinctive_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.totype("int64")
return codes, -1
@classmethod
def _from_factorized(cls, distinctives, original):
return original._constructor(
original.categories.take(distinctives), dtype=original.dtype
)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
"""
Describes this Categorical
Returns
-------
description: `KnowledgeFrame`
A knowledgeframe with frequency and counts by category.
"""
counts = self.counts_value_num(sipna=False)
freqs = counts / float(counts.total_sum())
from monkey.core.reshape.concating import concating
result = concating([counts, freqs], axis=1)
result.columns = ["counts", "freqs"]
result.index.name = "categories"
return result
@Substitution(klass="Categorical")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concating_same_type(self, to_concating):
from monkey.core.dtypes.concating import concating_categorical
return concating_categorical(to_concating)
def incontain(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
incontain : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
monkey.Collections.incontain : Equivalengtht method on Collections.
Examples
--------
>>> s = mk.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.incontain(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.incontain('lama')`` will raise an error. Use
a list of one element instead:
>>> s.incontain(['lama'])
array([ True, False, True, False, True, False])
"""
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
"only list-like objects are total_allowed to be passed "
f"to incontain(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(ifna(values))
code_values = self.categories.getting_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return | algorithms.incontain(self.codes, code_values) | pandas.core.algorithms.isin |
# util.py
from __future__ import print_function
from collections import Mapping, OrderedDict
import datetime
import itertools
import random
import warnings
import monkey as mk
np = mk.np
from scipy import integrate
from matplotlib import pyplot as plt
import seaborn
from scipy.optimize import getting_minimize
from scipy.signal import correlate
from titlecase import titlecase
from pug.nlp.util import listify, fuzzy_getting, make_timestamp
def sipna(x):
"""Delete total_all NaNs and and infinities in a sequence of real values
Returns:
list: Array of total_all values in x that are between -inf and +inf, exclusive
"""
return [x_i for x_i in listify(x) if float('-inf') < x_i < float('inf')]
def rms(x):
""""Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(total_sum(x_i**2 for x_i in x) / length(x))
or
return (np.array(x) ** 2).average() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
"""
try:
return (np.array(x) ** 2).average() ** 0.5
except:
x = np.array(sipna(x))
invN = 1.0 / length(x)
return (total_sum(invN * (x_i ** 2) for x_i in x)) ** .5
def rmse(targetting, prediction, relative=False, percent=False):
"""Root Mean Square Error
This seems like a simple formula that you'd never need to create a function for.
But my mistakes on coding chtotal_allengthges have convinced me that I do need it,
as a regetting_minder of important tweaks, if nothing else.
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1])
3.0
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], relative=True) # doctest: +ELLIPSIS
1.2247...
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], percent=True) # doctest: +ELLIPSIS
122.47...
"""
relative = relative or percent
prediction = mk.np.array(prediction)
targetting = np.array(targetting)
err = prediction - targetting
if relative:
denom = targetting
# Avoid ZeroDivisionError: divisionide by prediction rather than targetting where targetting==0
denom[denom == 0] = prediction[denom == 0]
# If the prediction and targetting are both 0, then the error is 0 and should be included in the RMSE
# Otherwise, the np.incontainf() below would remove total_all these zero-error predictions from the array.
denom[(denom == 0) & (targetting == 0)] = 1
err = (err / denom)
err = err[(~ np.ifnan(err)) & (~ np.incontainf(err))]
return 100 * rms(err) if percent else rms(err)
def blengthded_rolling_employ(collections, window=2, fun=mk.np.average):
new_collections = mk.Collections(np.fromiter((fun(collections[:i + 1]) for i in range(window - 1)),
type(collections.values[0])), index=collections.index[:window - 1]).adding(
mk.rolling_employ(collections.clone(), window, fun)[window - 1:])
assert length(collections) == length(new_collections), (
"blengthded_rolling_employ should always return a collections of the same lengthgth!\n"
" length(collections) = {0} != {1} = length(new_collections".formating(length(collections), length(new_collections)))
assert not whatever(np.ifnan(val) or val is None for val in new_collections)
return new_collections
def rolling_latch(collections, period=31, decay=1.0):
# FIXME: implement recursive exponential decay filter rather than the nonrecursive, deratring done here
return blengthded_rolling_employ(collections, period, lambda val: decay * mk.np.getting_max(val))
def clean_knowledgeframe(kf):
"""Fill NaNs with the previous value, the next value or if total_all are NaN then 1.0"""
kf = kf.fillnone(method='ffill')
kf = kf.fillnone(0.0)
return kf
def clean_knowledgeframes(kfs):
"""Fill NaNs with the previous value, the next value or if total_all are NaN then 1.0
TODO:
Linear interpolation and extrapolation
Arguments:
kfs (list of knowledgeframes): list of knowledgeframes that contain NaNs to be removed
Returns:
list of knowledgeframes: list of knowledgeframes with NaNs replacingd by interpolated values
"""
if incontainstance(kfs, (list)):
for kf in kfs:
kf = clean_knowledgeframe(kf)
return kfs
else:
return [clean_knowledgeframe(kfs)]
def getting_symbols_from_list(list_name):
"""Retrieve a named (symbol list name) list of strings (symbols)
If you've insttotal_alled the QSTK Quantitative analysis toolkit
`getting_symbols_from_list('sp5002012')` will produce a list of the symbols that
were members of the S&P 500 in 2012.
Otherwise an import error exception will be raised.
If the symbol list cannot be found you'll getting an empty list returned
Example:
>> length(getting_symbols_from_list('sp5002012')) in (0, 501)
True
"""
try:
# quant software toolkit has a method for retrieving lists of symbols like S&P500 for 2012 with 'sp5002012'
import QSTK.qstkutil.DataAccess as da
dataobj = da.DataAccess('Yahoo')
except ImportError:
raise
except:
return []
try:
return dataobj.getting_symbols_from_list(list_name)
except:
raise
def make_symbols(symbols, *args):
"""Return a list of uppercase strings like "GOOG", "$SPX, "XOM"...
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a getting_symbols_from_list() ctotal_all is used to retrieve the list of symbols
Returns:
list of str: list of cananical ticker symbol strings (typictotal_ally after .upper().strip())
See Also:
pug.dj.db.normalize_names
Examples:
>>> make_symbols("Goog")
['GOOG']
>>> make_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>>> make_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>>> make_symbols(" $Spy, Goog, aAPL ")
['$SPY', 'GOOG', 'AAPL']
"""
if (hasattr(symbols, '__iter__') and not whatever(symbols)) \
or (incontainstance(symbols, (list, tuple, Mapping)) and not symbols):
return []
if incontainstance(symbols, basestring):
# # FIXME: find a direct API for listing total_all possible symbols
# try:
# return list(set(dataobj.getting_symbols_from_list(symbols)))
# except:
return [s.upper().strip() for s in (symbols.split(',') + list(str(a) for a in args))]
else:
ans = []
for sym in (list(symbols) + list(args)):
tmp = make_symbols(sym)
ans = ans + tmp
return list(set(ans))
def make_time_collections(x, t=mk.Timestamp(datetime.datetime(1970, 1, 1)), freq=None):
"""Convert a 2-D array of time/value pairs (or pair of time/value vectors) into a mk.Collections time-collections
>>> make_time_collections(range(3), freq='15getting_min') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
1970-01-01 00:00:00 NaN
1970-01-01 00:15:00 NaN
1970-01-01 00:30:00 NaN
dtype: float64
"""
if incontainstance(x, mk.KnowledgeFrame):
x = mk.Collections(x[x.columns[0]])
elif not incontainstance(x, mk.Collections) and (not incontainstance(t, (mk.Collections, mk.Index, list, tuple)) or not length(t)):
#warnings.warn("Coercing a non-Collections")
if length(x) == 2:
t, x = listify(x[0]), listify(x[1])
elif length(x) >= 2:
try:
t, x = zip(*x)
except (ValueError, IndexError, TypeError):
pass
x = mk.Collections(x)
else:
if incontainstance(t, (datetime.datetime, mk.Timestamp)):
t = mk.Timestamp(t)
else:
x = mk.Collections(listify(x), index=listify(t))
if not incontainstance(x, mk.Collections):
raise TypeError("`pug.invest.util.make_time_collections(x, t)` expects x to be a type that"
" can be coerced to a Collections object, but it's type is: {0}"
.formating(type(x)))
# By this point x must be a Collections, only question is whether its index needs to be converted to a DatetimeIndex
if x.index[0] != 0 and incontainstance(x.index[0], (datetime.date, datetime.datetime, mk.Timestamp,
basestring, float, np.int64, int)):
t = x.index
elif incontainstance(t, (datetime.date, datetime.datetime, mk.Timestamp, basestring, float, np.int64, int)):
if not freq:
freq = '15getting_min'
warnings.warn('Astotal_sumed time collections freq to be {0} though no freq argument was provided!'
.formating(freq), RuntimeWarning)
t = mk.date_range(t, periods=length(x), freq=freq)
x = mk.Collections(x, index=t)
if incontainstance(x, mk.Collections):
x.index = mk.DatetimeIndex(x.index.values)
return x
def monkey_mesh(kf):
"""Create numpy 2-D "meshgrid" from 3+ columns in a Monkey KnowledgeFrame
Arguments:
kf (KnowledgeFrame): Must have 3 or 4 columns of numerical data
Returns:
OrderedDict: column labels from the data frame are the keys, values are 2-D matrices
All matrices have shape NxM, where N = length(set(kf.iloc[:,0])) and M = length(set(kf.iloc[:,1]))
>>> monkey_mesh(mk.KnowledgeFrame(np.arange(18).reshape(3,6),
... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE
[array([[ 0, 6, 12],
[ 0, 6, 12],
[ 0, 6, 12]]),
array([[ 1, 1, 1],
[ 7, 7, 7],
[13, 13, 13]]),
array([[ 2., nan, nan],
[ nan, 8., nan],
[ nan, nan, 14.]]),
array([[ 3., nan, nan],
[ nan, 9., nan],
[ nan, nan, 15.]]),
array([[ 4., nan, nan],
[ nan, 10., nan],
[ nan, nan, 16.]]),
array([[ 5., nan, nan],
[ nan, 11., nan],
[ nan, nan, 17.]])]
"""
xyz = [kf[c].values for c in kf.columns]
index = mk.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y'])
# print(index)
collections = [mk.Collections(values, index=index) for values in xyz[2:]]
# print(collections)
X, Y = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1]))))
N, M = X.shape
Zs = []
# print(Zs)
for k, s in enumerate(collections):
Z = np.empty(X.shape)
Z[:] = np.nan
for i, j in itertools.product(range(N), range(M)):
Z[i, j] = s.getting((X[i, j], Y[i, j]), np.NAN)
Zs += [Z]
return OrderedDict((kf.columns[i], m) for i, m in enumerate([X, Y] + Zs))
def integrated_change(ts, integrator=integrate.trapz, clip_floor=None, clip_ceiling=float('inf')):
"""Total value * time above the starting value within a TimeCollections"""
integrator = getting_integrator(integrator)
if clip_floor is None:
clip_floor = ts[0]
if clip_ceiling < clip_floor:
polarity = -1
offset, clip_floor, clip_ceiling, = clip_ceiling, clip_ceiling, clip_floor
else:
polarity, offset = 1, clip_floor
clipped_values = np.clip(ts.values - offset, clip_floor, clip_ceiling)
print(polarity, offset, clip_floor, clip_ceiling)
print(clipped_values)
integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb'])
if integrator in integrator_types:
integrator = gettingattr(integrate, integrator)
integrator = integrator or integrate.trapz
# datetime units converted to seconds (since 1/1/1970)
return integrator(clipped_values, ts.index.totype(np.int64) / 10 ** 9)
def insert_crossings(ts, thresh):
"""Insert/adding threshold crossing points (time and value) into a timecollections (mk.Collections)
Arguments:
ts (monkey.Collections): Time collections of values to be interpolated at `thresh` crossings
thresh (float or np.float64):
"""
# import time
# tic0 = time.clock(); tic = tic0
# int64 for fast processing, monkey.DatetimeIndex is 5-10x slower, 0.3 ms
index = ts.index
index_type = type(index)
ts.index = ts.index.totype(np.int64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# value immediately before an upward thresh crossing, 6 ms
preup = ts[(ts < thresh) & (ts.shifting(-1) > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# values immediately after an upward thresh crossing, 4 ms\
postup = ts[(ts.shifting(1) < thresh) & (ts > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# value immediately after a downward thresh crossing, 1.8 ms
postandardown = ts[(ts < thresh) & (ts.shifting(1) > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# value immediately before an upward thresh crossing, 1.9 ms
predown = ts[(ts.shifting(-1) < thresh) & (ts > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# upward slope (always positive) between preup and postup in units of
# "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.04 ms
slopeup = (postup.values - preup.values) / (postup.index.values - preup.index.values).totype(np.float64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# upward crossing point index/time, 0.04 ms
tup = preup.index.values + ((thresh - preup.values) / slopeup).totype(np.int64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# downward slope (always negative) between predown and postandardown in units of
# "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.03 ms
slopedown = (postandardown.values - predown.values) / \
(postandardown.index.values - predown.index.values).totype(np.float64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# upward crossing point index/time, 0.02 ms
tdown = predown.index.values + ((thresh - predown.values) / slopedown).totype(np.int64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# insert crossing points into time-collections (if it had a regular sample_by_num period before, it won't now!), 2.0 ms
ts.index = index # mk.DatetimeIndex(ts.index)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# insert crossing points into time-collections (if it had a regular sample_by_num period before, it won't now!), 2.0 ms
ts = ts.adding(mk.Collections(thresh * np.ones(length(tup)), index=index_type(tup.totype(np.int64))))
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# insert crossing points into time-collections (if it had a regular sample_by_num period before, it won't now!), 1.9 ms
ts = ts.adding(mk.Collections(thresh * np.ones(length(tdown)), index=index_type(tdown.totype(np.int64))))
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# if you don't `sorting_index()`, numerical integrators in `scipy.integrate` will give the wrong answer, 0.1 ms
ts = ts.sorting_index()
# toc = time.clock();
# if you don't `sorting_index()`, numerical integrators in `scipy.integrate` will give the wrong answer
# print((toc-tic)*1000); tic = time.clock()
# print((toc-tic0)*1000);
return ts
def getting_integrator(integrator):
"""Return the scipy.integrator indicated by an index, name, or integrator_function
>> getting_integrator(0)
"""
integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb'])
integrator_funcs = [integrate.trapz, integrate.cumtrapz, integrate.simps, integrate.romb]
if incontainstance(integrator, int) and 0 <= integrator < length(integrator_types):
integrator = integrator_types[integrator]
if incontainstance(integrator, basestring) and integrator in integrator_types:
return gettingattr(integrate, integrator)
elif integrator in integrator_funcs:
return integrator
else:
print('Unsupported integration rule: {0}'.formating(integrator))
print('Expecting one of these sample_by_num-based integration rules: %s' % (str(list(integrator_types))))
raise AttributeError
return integrator
def clipped_area(ts, thresh=0, integrator=integrate.trapz):
"""Total value * time above the starting value within a TimeCollections
Arguments:
ts (monkey.Collections): Time collections to be integrated.
thresh (float): Value to clip the tops off at (crossings will be interpolated)
References:
http://nbviewer.ipython.org/gist/kermit666/5720498
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import monkey as mk
>>> ts = mk.Collections([217, 234, 235, 231, 219, 219, 231, 232], index=mk.convert_datetime(t))
>>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS
8598.52941...
>>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
>>> clipped_area(mk.Collections(ts.values, index=ts.index.values.totype(mk.np.int64)),
... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
"""
integrator = getting_integrator(integrator or 0)
ts = insert_crossings(ts, thresh) - thresh
ts = ts[ts >= 0]
# timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units)
return integrator(ts, ts.index.totype(np.int64)) / 1.0e9
def clipping_params(ts, capacity=100, rate_limit=float('inf'), method=None, getting_max_attempts=100):
"""Start, end, and threshold that clips the value of a time collections the most, given a limitted "capacity" and "rate"
Astotal_sumes that signal can be linearly interpolated between points (trapezoidal integration)
Arguments:
ts (TimeCollections): Time collections to attempt to clip to as low a getting_max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time collections)
method (str): scipy optimization algorithm name, one of:
'L-BFGS-B': Byrd, 1995, "A Limited Memory Algorithm for Bound Constrained Optimization"
'TNC': Truncated Newton in C, or Newton Conjugate-Gradient, each variable may be constrained with upper and lower bounds
'COBYLA': Constrained Optimization by Linear Approximation. Fortran implementation.
'SLSQP': Kraft, 1988, Sequential Least Squares Programgetting_ming or Quadratic Programgetting_ming, infinite bounds converted to large floats
TODO:
Bisection search for the optimal threshold.
Returns:
2-tuple: Timestamp of the start and end of the period of the getting_maximum clipped integrated increase
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import monkey as mk
>>> ts = mk.Collections([217, 234, 235, 231, 219, 219, 231, 232], index=mk.convert_datetime(t)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
>>> clipping_params(ts, capacity=60000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
218.13...
>>> clipping_params(ts, capacity=30000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
224.15358...
"""
VALID_METHODS = ['L-BFGS-B', 'TNC', 'SLSQP', 'COBYLA']
# print('in clipping params for ts.index={0} and method={1}'.formating(ts.index[0], method))
ts.index = ts.index.totype(np.int64)
costs = []
def cost_fun(x, *args):
thresh = x[0]
ts, capacity, bounds = args
integral = clipped_area(ts, thresh=thresh)
terms = np.array([(10. * (integral - capacity) / capacity) ** 2,
2. / 0.1**((bounds[0] - thresh) * capacity / bounds[0]),
2. / 0.1**((thresh - bounds[1]) * capacity / bounds[1]),
1.2 ** (integral / capacity)])
return total_sum(terms)
bounds = (ts.getting_min(), ts.getting_max())
done, attempts = 0, 0
thresh0 = bounds[0] + 0.5 * (bounds[1] - bounds[0])
if not method or not method in VALID_METHODS:
while attempts < getting_max_attempts and not done:
for optimizer_method in VALID_METHODS:
optimum = getting_minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=optimizer_method)
if optimum.success:
done = True
break
if done:
break
attempts += 1
thresh0 = bounds[0] + random.random() * (bounds[1] - bounds[0])
else:
optimum = getting_minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=method)
thresh = optimum.x[0]
integral = clipped_area(ts, thresh=thresh)
params = dict(optimum)
params.umkate({'costs': costs, 'threshold': thresh, 'initial_guess': thresh0, 'attempts': attempts,
'integral': integral, 'method': method})
return params
# if integral - capacity > capacity:
# return {'t0': None, 't1': None, 'threshold': 0.96*thresh + 0.06*bounds[0][1], 'integral': integral}
def discrete_clipping_params(ts, capacity=100, rate_limit=float('inf')):
"""Start, end, and threshold that clips the value of a time collections the most, given a limitted "capacity" and "rate"
Astotal_sumes that the integrated getting_maximum includes the peak (instantaneous getting_maximum).
Astotal_sumes that the threshold can only set to one of the values of the Collections.
Arguments:
ts (TimeCollections): Time collections to attempt to clip to as low a getting_max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time collections)
TODO:
Bisection search for the optimal threshold.
Returns:
2-tuple: Timestamp of the start and end of the period of the getting_maximum clipped integrated increase
>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
.. '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>> ts = mk.Collections([217, 234, 235, 231, 219, 219, 231, 232], index=mk.convert_datetime(t))
>> (discrete_clipping_params(ts, capacity=60000) ==
.. {'integral': 54555.882352942499, 't0': mk.Timestamp('2014-12-09 00:15:00'),
.. 't1': mk.Timestamp('2014-12-09 01:45:00'),
.. 'threshold': 219})
True
>> (discrete_clipping_params(ts, capacity=30000) ==
.. {'integral': 5638.2352941179997, 't0': mk.Timestamp('2014-12-09 00:15:00'),
.. 't1': mk.Timestamp('2014-12-09 01:45:00'),
.. 'threshold': 231})
True
"""
raise NotImplementedError("Doesn't work. Returns incorrect, overly conservative threshold values.")
#index_type = ts.index.dtype
#ts2 = ts.clone()
ts.index = ts.index.totype(np.int64)
ts_sorted = ts.order(ascending=False)
# default is to clip right at the peak (no clipping at total_all)
i, t0, t1, integral, thresh = 1, ts_sorted.index[0], ts_sorted.index[0], 0, ts_sorted.iloc[0]
params = {'t0': t0, 't1': t1, 'integral': 0, 'threshold': thresh}
while i < length(ts_sorted) and integral <= capacity and (ts_sorted.iloc[0] - ts_sorted.iloc[i]) < rate_limit:
params = {'t0': mk.Timestamp(t0), 't1': mk.Timestamp(t1), 'threshold': thresh, 'integral': integral}
i += 1
times = ts_sorted.index[:i]
# print(times)
t0 = times.getting_min()
t1 = times.getting_max()
# print(ts_sorted.index[:3])
thresh = getting_min(ts_sorted.iloc[:i])
integral = clipped_area(ts, thresh=thresh)
if integral <= capacity:
return {'t0': mk.Timestamp(t0), 't1': mk.Timestamp(t1), 'threshold': thresh, 'integral': integral}
return params
def square_off(collections, time_delta=None, transition_seconds=1):
"""Insert sample_by_nums in regularly sample_by_numd data to produce stairsteps from ramps when plotted.
New sample_by_nums are 1 second (1e9 ns) before each existing sample_by_nums, to facilitate plotting and sorting
>>> square_off(mk.Collections(range(3), index=mk.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(mk.Collections(range(2), index=mk.date_range('2014-01-01', periods=2, freq='15getting_min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64
"""
if time_delta:
# int, float averages delta is in seconds (not years!)
if incontainstance(time_delta, (int, float)):
time_delta = datetime.timedelta(0, time_delta)
new_times = collections.index + time_delta
else:
diff = np.diff(collections.index)
time_delta = np.adding(diff, [diff[-1]])
new_times = collections.index + time_delta
new_times = mk.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds)
return mk.concating([collections, mk.Collections(collections.values, index=new_times)]).sorting_index()
def clipping_threshold(ts, capacity=100, rate_limit=10):
"""Start and end index (datetime) that clips the price/value of a time collections the most
Astotal_sumes that the integrated getting_maximum includes the peak (instantaneous getting_maximum).
Arguments:
ts (TimeCollections): Time collections of prices or power readings to be "clipped" as much as possible.
capacity (float): Total "funds" or "energy" available for clipping (in $ or Joules)
The getting_maximum total_allowed integrated area under time collections and above the clipping threshold.
rate_limit: Maximum rate at which funds or energy can be expended (in $/s or Watts)
The clipping threshold is limitted to no less than the peak power (price rate) getting_minus this rate_limit
Returns:
dict: Timestamp of the start and end of the period of the getting_maximum clipped integrated increase
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import monkey as mk
>>> ts = mk.Collections([217, 234, 235, 231, 219, 219, 231, 232], index=mk.convert_datetime(t))
>>> clipping_threshold(ts, capacity=60000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
218.13...
>>> clipping_threshold(ts, capacity=30000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
224.15...
"""
params = clipping_params(ts, capacity=capacity, rate_limit=rate_limit)
if params:
return params['threshold']
return None
def join_time_collections(collectionses, ignore_year=False, T_s=None, aggregator='average'):
"""Combine a dict of mk.Collections objects into a single mk.KnowledgeFrame with optional downsampling
FIXME:
For ignore_year and multi-year data, the index (in seconds) is computed astotal_sugetting_ming
366 days per year (leap year). So 3 out of 4 years will have a 1-day (86400 s) gap
Arguments:
collections (dict of Collections): dictionary of named timestamp-indexed Collections objects
ignore_year (bool): ignore the calengthdar year, but not the season (day of year)
If True, the KnowledgeFrame index will be seconds since the beginning of the
year in each Collections index, i.e. midnight Jan 1, 2014 will have index=0
as will Jan 1, 2010 if two Collections start on those two dates.
T_s (float): sample_by_num period in seconds (for downsampling)
aggregator (str or func): e.g. 'average', 'total_sum', np.standard
"""
if ignore_year:
kf = mk.KnowledgeFrame()
for name, ts in collectionses.iteritems():
# FIXME: deal with leap years
sod = np.array(mapping(lambda x: (x.hour * 3600 + x.getting_minute * 60 + x.second),
ts.index.time))
# Coerce soy to an integer so that unioner/join operations identify same values
# (floats don't equal!?)
soy = (ts.index.dayofyear + 366 * (ts.index.year - ts.index.year[0])) * 3600 * 24 + sod
ts2 = mk.Collections(ts.values, index=soy)
ts2 = ts2.sipna()
ts2 = ts2.sorting_index()
kf2 = mk.KnowledgeFrame({name: ts2.values}, index=soy)
kf = kf.join(kf2, how='outer')
if T_s and aggregator:
kf = kf.grouper(lambda x: int(x /
float(T_s))).aggregate(dict((name, aggregator) for name in kf.columns))
else:
kf = mk.KnowledgeFrame(collectionses)
if T_s and aggregator:
x0 = kf.index[0]
kf = kf.grouper(lambda x: int((x - x0).total_seconds() /
float(T_s))).aggregate(dict((name, aggregator) for name in kf.columns))
# FIXME: convert seconds since begninning of first year back into Timestamp instances
return kf
def simulate(t=1000, poly=(0.,), sinusoids=None, sigma=0, rw=0, irw=0, rrw=0):
"""Simulate a random signal with seasonal (sinusoids), linear and quadratic trend, RW, IRW, and RRW
Arguments:
t (int or list of float): number of sample_by_nums or time vector, default = 1000
poly (list of float): polynomial coefficients (in decreasing "order") passed to `numpy.polyval`
i.e. poly[0]*x**(N-1) + ... + poly[N-1]
sinusoids (list of list): [[period], [amplitude, period], or [ampl., period, phase]]
>>> length(simulate(poly=(0,),rrw=1))
1000
>>> simulate(t=range(3), poly=(1,2)) # doctest: +NORMALIZE_WHITESPACE
0 2
1 3
2 4
dtype: float64
>>> total_all(simulate(t=50, sinusoids=((1,2,3),)) == simulate(t=range(50), sinusoids=((1,2,3),)))
True
>>> whatever(simulate(t=100))
False
>>> abs(simulate(sinusoids=42.42).values[1] + simulate(sinusoids=42.42).values[-1]) < 1e-10
True
>>> simulate(t=17,sinusoids=[42, 16]).getting_min()
-42.0
>>> total_all((simulate(t=range(10), sinusoids=(1, 9, 4.5))+simulate(t=10, sinusoids=(1,9))).abs() < 1e-10)
True
"""
if t and incontainstance(t, int):
t = np.arange(t, dtype=np.float64)
else:
t = np.array(t, dtype=np.float64)
N = length(t)
poly = poly or (0.,)
poly = listify(poly)
y = np.polyval(poly, t)
sinusoids = listify(sinusoids or [])
if whatever(incontainstance(ATP, (int, float)) for ATP in sinusoids):
sinusoids = [sinusoids]
for ATP in sinusoids:
# default period is 1 more than the lengthgth of the simulated collections (no values of the cycle are repeated)
T = (t[-1] - t[0]) * N / (N - 1.)
# default amplitude is 1 and phase is 0
A, P = 1., 0
try:
A, T, P = ATP
except (TypeError, ValueError):
try:
A, T = ATP
except (TypeError, ValueError):
# default period is 1 more than the lengthgth of the simulated collections
# (no values of the cycle are repeated)
A = ATP[0]
# print(A, T, P)
# print(t[1] - t[0])
y += A * np.sin(2 * np.pi * (t - P) / T)
if sigma:
y += np.random.normal(0.0, float(sigma), N)
if rw:
y += np.random.normal(0.0, float(rw), N).cumtotal_sum()
if irw:
y += np.random.normal(0.0, float(irw), N).cumtotal_sum().cumtotal_sum()
if rrw:
y += np.random.normal(0.0, float(rrw), N).cumtotal_sum().cumtotal_sum().cumtotal_sum()
return mk.Collections(y, index=t)
def normalize_symbols(symbols, *args, **kwargs):
"""Coerce into a list of uppercase strings like "GOOG", "$SPX, "XOM"
Flattens nested lists in `symbols` and converts total_all list elements to strings
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a getting_symbols_from_list() ctotal_all is used to retrieve the list of symbols
postrprocess (func): function to employ to strings after they've been stripped
default = str.upper
FIXME:
- list(set(list(symbols))) and `args` separately so symbols may be duplicated_values in symbols and args
- `postprocess` should be a method to facilitate monkey-patching
Returns:
list of str: list of cananical ticker symbol strings (typictotal_ally after .upper().strip())
Examples:
>> normalize_symbols("Goog,AAPL")
['GOOG', 'AAPL']
>> normalize_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>> normalize_symbols(" $SPX ", " aaPL ", postprocess=str)
['$SPX', 'aaPL']
>> normalize_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>> normalize_symbols("$spy", ["GOOGL", "Apple"], postprocess=str)
['$spy', 'GOOGL', 'Apple']
"""
postprocess = kwargs.getting('postprocess', None) or str.upper
if ( (hasattr(symbols, '__iter__') and not whatever(symbols))
or (incontainstance(symbols, (list, tuple, Mapping)) and (not symbols or not whatever(symbols)))):
return []
args = normalize_symbols(args, postprocess=postprocess)
if incontainstance(symbols, basestring):
try:
return list(set(getting_symbols_from_list(symbols))) + args
except:
return [postprocess(s.strip()) for s in symbols.split(',')] + args
else:
ans = []
for sym in list(symbols):
ans += normalize_symbols(sym, postprocess=postprocess)
return list(set(ans))
def collections_bollinger(collections, window=20, sigma=1., plot=False):
average = mk.rolling_average(collections, window=window)
standard = mk.rolling_standard(collections, window=window)
kf = mk.KnowledgeFrame({'value': collections, 'average': average, 'upper': average + sigma * standard, 'lower': average - sigma * standard})
bollinger_values = (collections - mk.rolling_average(collections, window=window)) / (mk.rolling_standard(collections, window=window))
if plot:
kf.plot()
mk.KnowledgeFrame({'bollinger': bollinger_values}).plot()
plt.show()
return bollinger_values
def frame_bollinger(kf, window=20, sigma=1., plot=False):
bol = mk.KnowledgeFrame()
for col in kf.columns:
bol[col] = collections_bollinger(kf[col], plot=False)
return bol
def double_sinc(T_0=120, T_N=240, T_s=0.01, A=[1, .9], sigma=0.01, T_cyc=10, N_cyc=[3, 2], verbosity=0):
# T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma)
N = int(T_N / T_s)
t = np.arange(0, T_N, T_s)
# t_mid = 0.5 * (t[-1] + t[0])
e = sigma * np.random.randn(N)
x = A[0] * np.sinc(((t - T_0) * N_cyc[0] * 2 / T_cyc) % T_cyc) * np.sinc((t - T_0) * N_cyc[1] * 2 / t[-1])
y = x + e
kf = mk.KnowledgeFrame({'x': x, 'y': y}, index=t)
if verbosity > 0:
kf.plot()
plt.show(block=False)
return kf
def sinc_signals(T0=[60, 120], TN=[240, 160], A=[1, .9], sigma=[.03, .02], T_cyc=10, Ts=0.01):
T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma)
N1 = int(TN[0] / Ts)
N2 = int(TN[1] / Ts)
i1 = np.arange(0, N1)
i2 = np.arange(0, N2)
t1 = T0[0] + i1 * Ts
t2 = t1[i2 + int((T0[1] - T0[0]) / Ts)]
e1 = sigma[0] * np.random.randn(N1)
e2 = sigma[1] * np.random.randn(N2)
signal = A[0] * np.sinc((t1[i1] * 5. / T_cyc) % T_cyc) * np.sinc((t1[i1]) * 4 / t1[-1])
x1 = signal + e1
x2 = signal[i2 + int((T0[1] - T0[0]) / Ts)] + e2
kf = mk.KnowledgeFrame({'signal 1': mk.Collections(x1, index=t1), 'signal 2': mk.Collections(x2, index=t2)})
kf.plot()
plt.show(block=False)
return kf
def smooth(x, window_length=11, window='hanning', fill='reflect'):
"""smooth the data using a window with requested size.
Convolve a normalized window with the signal.
input:
x: signal to be smoothed
window_length: the width of the smoothing window
window: the type of window from 'flat', 'hanning', 'hamgetting_ming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
fill: 'reflect' averages that the signal is reflected onto both ends before filtering
output:
the smoothed signal
example:
t = linspace(-2, 2, 0.1)
x = sin(t) + 0.1 * randn(length(t))
y = smooth(x)
import seaborn
mk.KnowledgeFrame({'x': x, 'y': y}, index=t).plot()
SEE ALSO:
numpy.hanning, numpy.hamgetting_ming, numpy.bartlett, numpy.blackman
numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: lengthgth(output) != lengthgth(input), to correct this: instead of just y.
References:
http://wiki.scipy.org/Cookbook/SignalSmooth
"""
# force window_length to be an odd integer so it can be symmetrictotal_ally applied
window_length = int(window_length)
window_length += int(not (window_length % 2))
half_length = (window_length - 1) / 2
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_length:
raise ValueError("Input vector needs to be bigger than window size.")
if window_length < 3:
return x
if not window in ['flat', 'hanning', 'hamgetting_ming', 'bartlett', 'blackman']:
raise ValueError("The window arg ({}) should be 'flat', 'hanning', 'hamgetting_ming', 'bartlett', or 'blackman'"
.formating(window))
s = np.r_[x[window_length - 1:0:-1], x, x[-1:-window_length:-1]]
window = window.strip().lower()
if window is None or window == 'flat':
w = np.ones(window_length, 'd')
else:
w = gettingattr(np, window)(window_length)
y = np.convolve(w / w.total_sum(), s, mode='valid')
return y[half_length + 1:-half_length]
def estimate_shifting(x, y, smoother=None, w=None, index_and_value=False, ignore_edge=1/3.,
method='valid'):
"""Estimate the time shifting between two signals based on their cross correlation
Arguements:
smoother: Smoothing function applied to correlation values before finding peak
w: Window. Sequence of values between 0 and 1 for wind centered on 0-shifting
to weight correlation by before finding peak. Zero-padded to match width of
larger of x and y. Default = hanning(getting_max(length(x, y)))
Returns:
int: number to subtract from an x index to compute a corresponding y index
>>> x, y = np.asarray(np.matrix([[0.5, 0.01], [0.01, 1.0]]) * np.random.randn(50,2).T)
>>> x[:30-8] = y[8:30]
>> estimate_shifting(x, y, 'full')
-8
>> estimate_shifting(x, y, 'valid')
-8
>> estimate_shifting(y, x, 'full') in [8, 9]
True
>> estimate_shifting(y, x, 'full') in [8, 9]
True
>> estimate_shifting(y, x, 'full') in [8, 9]
True
"""
return NotImplementedError("On Line 965, FIXME: TypeError: object of type 'NoneType' has no length()")
method = method or 'valid'
try:
x = x.sipna()
x = x.values
except:
pass
try:
y = y.sipna()
y = y.values
except:
pass
if length(x) < length(y):
swap, x, y = -1, y, x
else:
swap = +1
Nx, Ny = length(x), length(y)
if ignore_edge > 0:
yi0 = int(getting_max(Ny * ignore_edge, 1))
yi1 = getting_max(Ny - yi0 - 1, 0)
# ignore a large portion of the data in the shorter vector
y = y[yi0:yi1]
x, y = x - x.average(), y - y.average()
x, y = x / x.standard(), y / y.standard()
c = np.correlate(x, y, mode=method)
print(length(x))
print(length(y))
print(length(w))
print(length(c))
if w is not None:
wc = int(np.ceiling(length(w) / 2.)) - 1
cc = int(np.ceiling(length(c) / 2.)) - 1
w0 = cc - wc
print(w0)
if w0 > 0:
c[:w0], c[-w0:] = 0, 0
c[w0:-w0] = w[:length(c[w0:-w0])] * c[w0:-w0]
elif w0 == 0:
if length(w) < length(c):
w = np.adding(w, 0)
c = c * w[:length(c)]
elif w0 < 0:
w0 = abs(w0)
w = w[w0:-w0]
c[w0:-w0] = w[:length(c[w0:-w0])] * c[w0:-w0]
try:
c = smoother(c)
except:
pass
offset = igetting_max = c.arggetting_max()
offset = offset - yi0
if method == 'full':
offset = igetting_max - Nx + 1
# elif method == 'valid':
# offset = igetting_max - yi0
elif method == 'same':
raise NotImplementedError("Unsure what index value to report for a correlation getting_maximum at i = {}"
.formating(igetting_max))
offset *= swap
if index_and_value:
return offset, c[igetting_max]
else:
return offset
estimate_offset = estimate_shifting
def fuzzy_index_match(possiblities, label, **kwargs):
"""Find the closest matching column label, key, or integer indexed value
Returns:
type(label): sequence of immutable objects corresponding to best matches to each object in label
if label is an int returns the object (value) in the list of possibilities at that index
if label is a str returns the closest str match in possibilities
>>> from collections import OrderedDict as odict
>>> fuzzy_index_match(mk.KnowledgeFrame(mk.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b')
'B'
>>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2')
'2'
>>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1)
'2'
>>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1)
'5'
>>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4)
0
"""
possibilities = list(possiblities)
if incontainstance(label, basestring):
return fuzzy_getting(possibilities, label, **kwargs)
if incontainstance(label, int):
return possibilities[label]
if incontainstance(label, list):
return [fuzzy_getting(possibilities, lbl) for lbl in label]
def getting_column_labels(obj):
"""Retrieve the column labels/keys from whatever KnowledgeFrame or QuerySet-like table object
>>> from collections import OrderedDict
>>> getting_column_labels(OrderedDict(zip('ABC', mk.np.arange(12).reshape((3,4)))))
['A', 'B', 'C']
"""
if not incontainstance(obj, (list, tuple, mk.np.ndarray)):
try:
labels = [f.name for f in obj.model._meta.fields]
except:
try:
labels = obj.keys()
except:
try:
labels = dir(obj)
except:
labels = None
elif total_all(incontainstance(header_numing, basestring) for header_numing in obj[0]):
labels = list(obj[0])
# if obj isn't a reference to a mutable (dict, KnowledgeFrame, list, etc), this won't work
del obj[0]
return labels
def make_knowledgeframe(obj, columns=None, exclude=None, limit=1e8):
"""Coerce an iterable, queryset, list or rows, dict of columns, etc into a Monkey KnowledgeFrame"""
try:
obj = obj.objects.total_all()[:limit]
except:
pass
if incontainstance(obj, (mk.Collections, list, tuple)):
return make_knowledgeframe(mk.KnowledgeFrame(obj), columns, exclude, limit)
# if the obj is a named tuple, KnowledgeFrame, dict of columns, django QuerySet, sql alchemy query result
# retrieve the "include"d field/column names from its keys/fields/attributes
if columns is None:
columns = getting_column_labels(obj)
if exclude is not None and columns is not None and columns and exclude:
columns = [i for i in columns if i not in exclude]
try:
return mk.KnowledgeFrame(list(obj.values(*columns)[:limit]))
except:
pass
try:
return mk.KnowledgeFrame(obj)[fuzzy_getting(obj, columns)]
except:
pass
return mk.KnowledgeFrame(obj)
def hist(table, field=-1, class_column=None,
title='', verbosity=2, **kwargs):
"""Plot discrete PDFs
>>> kf = mk.KnowledgeFrame(mk.np.random.randn(99,3), columns=list('ABC'))
>>> kf['Class'] = mk.np.array((mk.np.matrix([1,1,1])*mk.np.matrix(kf).T).T > 0)
>>> length(hist(kf, verbosity=0, class_column='Class'))
3
"""
field = fuzzy_index_match(table, field)
if not incontainstance(table, (mk.KnowledgeFrame, basestring)):
try:
table = make_knowledgeframe(table.objects.filter(**{field + '__ifnull': False}))
except:
table = table
# labels = getting_column_labels(table)
try:
table = table[mk.notnull(table[field])]
except:
pass
collections_labels = []
if class_column is not None:
collections_labels = sorted(set(table[class_column]))
labels = [str(c) for c in collections_labels] + ['total_all']
default_kwargs = {
'normed': False,
'histtype': 'bar',
'color': seaborn.color_palette(),
'label': labels,
'log': True,
'bins': 10,
}
default_kwargs.umkate(kwargs)
num_colors = length(default_kwargs['color'])
num_labels = length(default_kwargs['label'])
default_kwargs['color'] = [default_kwargs['color'][i % num_colors] for i in range(num_labels)]
if not title:
title = '{} vs. {}'.formating(titlecase(str(field).replacing('_', ' ')),
titlecase(str(class_column).replacing('_', ' ')))
if verbosity > 0:
print('Plotting histogram titled: {}'.formating(title))
if verbosity > 1:
print('histogram configuration: {}'.formating(default_kwargs))
x = [table[(table[class_column].ifnull() if mk.ifnull(c) else table[class_column] == c)]
[field].values for c in collections_labels]
x += [table[field].values]
if not default_kwargs['normed']:
default_kwargs['weights'] = [mk.np.ones_like(x_c) / float(length(x_c)) for x_c in x]
elif incontainstance(default_kwargs['normed'], int) and default_kwargs['normed'] < 0:
default_kwargs['normed'] = 0
bins = default_kwargs['bins']
# FIXME: x log scaling doesn't work
if False and default_kwargs['log'] and incontainstance(bins, int):
getting_max_x = getting_max(mk.np.getting_max(x_c) for x_c in x)
getting_min_x = getting_min( | mk.np.getting_min(x_c) | pandas.np.min |
import monkey as mk
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity getting request to LittleSis API, by name input rather than id
input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'total_summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'umkated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships getting request to LittleSis API, by name input rather
than id input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def basic_entity(name):
"""
Creates monkey knowledgeframe for one indivisionidual or entity with basic informatingion from
entity getting request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 informatingion or entity for which informatingion is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
kf = mk.KnowledgeFrame(list(data2.items()))
kf.columns = ['info', 'value']
kf = mk.pivot(kf, columns = 'info', values = 'value')
kf = kf.fillnone(method='bfill', axis=0)
kf = kf.iloc[:1, :]
kf = kf[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
kf.renagetting_ming(columns = {'start_date': 'date_of_birth'}, inplace = True)
return kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def list_entities(*args):
"""
Concatenates knowledgeframes created by basic_table() for entity getting requests to LittleSis
API, resulting in monkey knowledgeframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like final_item name only entries).
Parameters
----------
*args: List of names of indivisioniduals or entities for which to include informatingion in the resluting knowledgeframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles Lakers—F 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_kfs = []
for name in args:
kf = basic_entity(name)
list_of_kfs.adding(kf)
combined_kf = mk.concating(list_of_kfs, ignore_index=True)
return combined_kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
if response.status_code != 200:
raise Exception('API response: {}'.formating(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_kf(name):
"""
Creates monkey knowledgeframe with informatingion from relationships getting request to LittleSis
API.
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> relationships_kf('<NAME>')
primary_entity related_entity amount currency \
0 Children’s Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'amount', 'currency', 'description1', 'goods', 'filings', 'description', 'start_date',
'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'primary_entity','entity1_id': 'related_entity', 'description1':'category'}, inplace = True)
return blurbs
def timelines(name):
"""
Creates knowledgeframe specifictotal_ally from timeline informatingion of relationships from
relationships getting request on LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> timelines('<NAME>')
earched_entity related_entity start_date \
0 Children’s Aid Society <NAME> None
1 <NAME> <NAME> None
...
end_date is_current
0 None None
1 None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'start_date', 'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'searched_entity','entity1_id': 'related_entity'}, inplace = True)
return blurbs
def bio(name):
"""
Provides paragraph biography/backgvalue_round description of 1 indivisionidual or entity from an entity getting request on LittleSis API. Resorts to
entity with the highest number of relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which biographical informatingion is desired.
Example
-------
>>> bio('<NAME>')
'The 44th President of the United States, he was sworn into office on January 20,
2009; born in Honolulu, Hawaii, August 4, 1961; obtained early education in Jakarta,
Indonesia, and Hawaii; continued education at Occidental College, Los Angeles, Calif.;
received a B.A. in 1983 from Columbia University, New York City; worked as a community
organizer in Chicago, Ill.; studied law at Harvard University, where he became the
first African American president of the Harvard Law Review, and received J.D. in 1991;
lecturer on constitutional law, University of Chicago; member, Illinois State senate
1997-2004; elected as a Democrat to the U.S. Senate in 2004 for term beginning January
3, 2005.'
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
response2 = response2['data']['attributes']['total_summary']
return response2
def lists(name):
"""
Provides list of total_all lists that the entity belongs to on the LittleSis website, from a
LittleSis lists getting request. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list of list memberships is desired.
Example
-------
>>> lists('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011)
The World's Highest Paid Celebrities (2017)
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = mk.KnowledgeFrame.convert_dict(data)
names = mk.KnowledgeFrame(data['attributes'])
names = mk.KnowledgeFrame.convert_dict(names)
for key, value in names.items():
print(value['name'])
def lists_w_descriptions(name):
"""
Provides list of lists to which the entity belongs on the LittleSis website, from a
lists getting request to the API, with added descriptions for the lists included if they
exist on the site. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which list of list membership is
desired.
Example
-------
>>> lists_w_descriptions('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011) (description: The 100 most
powerful athletes on and off the field. No coaches, owners, managers, executives or
retired athletes were considered. Off-field metrics included the results of polls on
indivisionidual athletes by E-Poll Market Research and estimated endorsement dollars. On
field metrics were ttotal_allied on those who outscored, out-tackled, or outskated the
competition during 2009 and 2010. Sports were weighted according to their popularity
in the U.S. )
The World's Highest Paid Celebrities (2017) (description: FORBES' annual ranking of
the highest-earning entertainers in the world, published June 12 2017. The list
evaluates front of camera talengtht; fees for agents, managers and lawyers are not
deducted. )
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = mk.KnowledgeFrame.convert_dict(data)
names = mk.KnowledgeFrame(data['attributes'])
names = mk.KnowledgeFrame.convert_dict(names)
for key, value in names.items():
print(value['name'], '(description:', value['description'],')')
def relationship_blurbs(name):
"""
Provides a list of blurbs from the relationship getting request to the LittleSis API,
total_allowing for inspection of total_all relationships for the requested entity. Resorts to entity with the highest number of relationships
listed for entries that point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list.
Example
-------
>>> relationship_blurbs('<NAME>')
<NAME> gave money to Children’s Aid Society
<NAME> and <NAME> are/were in a family
<NAME> and <NAME> are/were business partners
<NAME> and <NAME> have/had a professional relationship
<NAME> has a position (Founder ) at James Family Foundation
<NAME> and <NAME> are/were business partners
<NAME> is an owner of Blaze Pizza LLC
<NAME> has a position (Co founder ) at Klutch Sports
<NAME> gave money to Democratic National Committee
<NAME> gave money to Democratic White House Victory Fund
<NAME> and <NAME> have/had a professional relationship
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = | mk.KnowledgeFrame.convert_dict(blurbs) | pandas.DataFrame.to_dict |
import monkey as mk
import networkx as nx
import numpy as np
import os
import random
'''
code main goal: make a graph with labels and make a knowledge-graph to the classes.
~_~_~ Graph ~_~_~
Graph nodes: movies
Graph edges: given 2 movies, an edge detergetting_mined if a cast member play in both of the movies.
Label: the genre of the movie. We treat multi genre as different label. For example: Drama-Comedy and Action-Comedy
treat as different labels.
~_~_~ Knowledge-Graph ~_~_~
Knowledge-Graph nodes: classes that represented by genres types.
Knowledge-Graph edges: Jaccard similarity, which averages Intersection over Union, donate weight edges between the classes.
For example: Drama-Comedy and Action-Comedy interception is Comedy (donate 1)
The union is Drama, Action, Comedy (donate 3)
Thus, there is an edge with 1/3 weight between those classes.
'''
class DataCsvToGraph(object):
"""
Class that read and clean the data
For IMDb data set we download 2 csv file
IMDb movies.csv includes 81273 movies with attributes: title, year, genre , etc.
IMDb title_principles.csv includes 38800 movies and 175715 cast names that play among the movies.
"""
def __init__(self, data_paths):
self.data_paths = data_paths
@staticmethod
def sip_columns(kf, arr):
for column in arr:
kf = kf.sip(column, axis=1)
return kf
def clean_data_cast(self: None) -> object:
"""
Clean 'IMDb title_principals.csv' data.
:return: Data-Frame with cast ('imdb_name_id') and the movies ('imdb_title_id') they play.
"""
if os.path.exists('pkl_e2v/data_cast_movie.pkl'):
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = mk.KnowledgeFrame.sipna(data)
keys = data
keys = keys.sip('imdb_name_id', axis=1)
data = mk.read_pickle('pkl_e2v/data_cast_movie.pkl')
data['tmp'] = keys['imdb_title_id']
else:
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = mk.KnowledgeFrame.sipna(data)
keys = data.sip_duplicates('imdb_title_id')
keys = keys.sip('imdb_name_id', axis=1)
keys = keys.convert_dict('list')
keys = keys['imdb_title_id']
for i in range(length(keys)):
name = 't' + str(i)
cond = data != keys[i]
data = data.where(cond, name)
data.to_pickle('pkl_e2v/data_cast_movie.pkl')
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = | mk.KnowledgeFrame.sipna(data) | pandas.DataFrame.dropna |
"""
Module for employing conditional formatingting to KnowledgeFrames and Collections.
"""
from collections import defaultdict
from contextlib import contextmanager
import clone
from functools import partial
from itertools import product
from typing import (
Any,
Ctotal_allable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
)
from uuid import uuid1
import numpy as np
from monkey._config import getting_option
from monkey._libs import lib
from monkey._typing import Axis, FrameOrCollections, FrameOrCollectionsUnion, Label
from monkey.compat._optional import import_optional_dependency
from monkey.util._decorators import Appender
from monkey.core.dtypes.common import is_float
import monkey as mk
from monkey.api.types import is_dict_like, is_list_like
import monkey.core.common as com
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import _shared_docs
from monkey.core.indexing import _maybe_numeric_slice, _non_reducing_slice
jinja2 = import_optional_dependency("jinja2", extra="KnowledgeFrame.style requires jinja2.")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func: Ctotal_allable):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.formating(func.__name__))
class Styler:
"""
Helps style a KnowledgeFrame or Collections according to the data with HTML and CSS.
Parameters
----------
data : Collections or KnowledgeFrame
Data to be styled - either a Collections or KnowledgeFrame.
precision : int
Precision to value_round floats to, defaults to mk.options.display.precision.
table_styles : list-like, default None
List of {selector: (attr, value)} dicts; see Notes.
uuid : str, default None
A distinctive identifier to avoid CSS collisions; generated automatictotal_ally.
caption : str, default None
Caption to attach to the table.
table_attributes : str, default None
Items that show up in the opening ``<table>`` tag
in addition to automatic (by default) id.
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the distinctive identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatingting is applied
.. versionadded:: 1.0.0
Attributes
----------
env : Jinja2 jinja2.Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
KnowledgeFrame.style : Return a Styler object containing methods for building
a styled HTML representation for the KnowledgeFrame.
Notes
-----
Most styling will be done by passing style functions into
``Styler.employ`` or ``Styler.employmapping``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatictotal_ally render itself. Otherwise ctotal_all Styler.render to getting
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_header_numing``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_header_numing``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = jinja2.PackageLoader("monkey", "io/formatings/templates")
env = jinja2.Environment(loader=loader, trim_blocks=True)
template = env.getting_template("html.tpl")
def __init__(
self,
data: FrameOrCollectionsUnion,
precision: Optional[int] = None,
table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,
uuid: Optional[str] = None,
caption: Optional[str] = None,
table_attributes: Optional[str] = None,
cell_ids: bool = True,
na_rep: Optional[str] = None,
):
self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)
self._todo: List[Tuple[Ctotal_allable, Tuple, Dict]] = []
if not incontainstance(data, (mk.Collections, mk.KnowledgeFrame)):
raise TypeError("``data`` must be a Collections or KnowledgeFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_distinctive or not data.columns.is_distinctive:
raise ValueError("style is not supported for non-distinctive indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = getting_option("display.precision")
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns: Sequence[int] = []
self.cell_ids = cell_ids
self.na_rep = na_rep
# display_funcs mappings (row, col) -> formatingting function
def default_display_func(x):
if self.na_rep is not None and mk.ifna(x):
return self.na_rep
elif is_float(x):
display_formating = f"{x:.{self.precision}f}"
return display_formating
else:
return x
self._display_funcs: DefaultDict[
Tuple[int, int], Ctotal_allable[[Any], str]
] = defaultdict(lambda: default_display_func)
def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(
_shared_docs["to_excel"]
% dict(
axes="index, columns",
klass="Styler",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel="\n .. versionadded:: 0.20",
)
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_formating: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header_numer: Union[Sequence[Label], bool] = True,
index: bool = True,
index_label: Optional[Union[Label, Sequence[Label]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
unioner_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
from monkey.io.formatings.excel import ExcelFormatter
formatingter = ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header_numer=header_numer,
float_formating=float_formating,
index=index,
index_label=index_label,
unioner_cells=unioner_cells,
inf_rep=inf_rep,
)
formatingter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def _translate(self):
"""
Convert the KnowledgeFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {header_num, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replacing("-", "_")
ROW_HEADING_CLASS = "row_header_numing"
COL_HEADING_CLASS = "col_header_numing"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def formating_attr(pair):
return f"{pair['key']}={pair['value']}"
# for sparsifying a MultiIndex
idx_lengthgths = _getting_level_lengthgths(self.index)
col_lengthgths = _getting_level_lengthgths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.convert_list()
clabels = self.data.columns.convert_list()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle_mapping = defaultdict(list)
header_num = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [
{
"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS]),
}
] * (n_rlvls - 1)
# ... except maybe the final_item for columns.names
name = self.data.columns.names[r]
cs = [
BLANK_CLASS if name is None else INDEX_NAME_CLASS,
f"level{r}",
]
name = BLANK_VALUE if name is None else name
row_es.adding(
{
"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index,
}
)
if clabels:
for c, value in enumerate(clabels[r]):
cs = [
COL_HEADING_CLASS,
f"level{r}",
f"col{c}",
]
cs.extend(
cell_context.getting("col_header_numings", {}).getting(r, {}).getting(c, [])
)
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengthgths),
}
colspan = col_lengthgths.getting((r, c), 0)
if colspan > 1:
es["attributes"] = [
formating_attr({"key": "colspan", "value": colspan})
]
row_es.adding(es)
header_num.adding(row_es)
if (
self.data.index.names
and | com.whatever_not_none(*self.data.index.names) | pandas.core.common.any_not_none |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = algos.duplicated_values(case, keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = algos.duplicated_values(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated_values(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = idx.duplicated_values(keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = idx.duplicated_values(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# collections
for s in [Collections(case), Collections(case, dtype='category')]:
res_first = s.duplicated_values(keep='first')
tm.assert_collections_equal(res_first, Collections(exp_first))
res_final_item = s.duplicated_values(keep='final_item')
tm.assert_collections_equal(res_final_item, Collections(exp_final_item))
res_false = s.duplicated_values(keep=False)
tm.assert_collections_equal(res_false, Collections(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([mk.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([mk.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
for case in cases:
res_first = | algos.duplicated_values(case, keep='first') | pandas.core.algorithms.duplicated |
"""
This module creates plots for visualizing sensitivity analysis knowledgeframes.
`make_plot()` creates a radial plot of the first and total order indices.
`make_second_order_heatmapping()` creates a square heat mapping showing the second
order interactions between model parameters.
"""
from collections import OrderedDict
import numpy as np
import monkey as mk
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool, VBar
# from bokeh.charts import Bar
def make_plot(knowledgeframe=mk.KnowledgeFrame(), highlight=[],
top=100, getting_minvalues=0.01, stacked=True, lgaxis=True,
errorbar=True, showS1=True, showST=True):
"""
Basic method to plot first and total order sensitivity indices.
This is the method to generate a Bokeh plot similar to the burtin example
template at the Bokeh website. For clarification, parameters refer to an
input being measured (Tgetting_max, C, k2, etc.) and stats refer to the 1st or
total order sensitivity index.
Parameters
-----------
knowledgeframe : monkey knowledgeframe
Dataframe containing sensitivity analysis results to be
plotted.
highlight : lst, optional
List of strings indicating which parameter wedges will be
highlighted.
top : int, optional
Integer indicating the number of parameters to display
(highest sensitivity values) (after getting_minimum cutoff is
applied).
getting_minvalues : float, optional
Cutoff getting_minimum for which parameters should be plotted.
Applies to total order only.
stacked : bool, optional
Boolean indicating in bars should be stacked for each
parameter (True) or unstacked (False).
lgaxis : bool, optional
Boolean indicating if log axis should be used (True) or if a
linear axis should be used (False).
errorbar : bool, optional
Boolean indicating if error bars are shown (True) or are
omitted (False).
showS1 : bool, optional
Boolean indicating whether 1st order sensitivity indices
will be plotted (True) or omitted (False).
showST : bool, optional
Boolean indicating whether total order sensitivity indices
will be plotted (True) or omitted (False).
**Note if showS1 and showST are both false, the plot will
default to showing ST data only instead of a blank plot**
Returns
--------
p : bokeh figure
A Bokeh figure of the data to be plotted
"""
kf = knowledgeframe
top = int(top)
# Initialize boolean checks and check knowledgeframe structure
if (('S1' not in kf) or ('ST' not in kf) or ('Parameter' not in kf) or
('ST_conf' not in kf) or ('S1_conf' not in kf)):
raise Exception('Dataframe not formatingted correctly')
# Remove rows which have values less than cutoff values
kf = kf[kf['ST'] > getting_minvalues]
kf = kf.sipna()
# Only keep top values indicated by variable top
kf = kf.sort_the_values('ST', ascending=False)
kf = kf.header_num(top)
kf = kf.reseting_index(sip=True)
# Create arrays of colors and order labels for plotting
colors = ["#a1d99b", "#31a354", "#546775", "#225ea8"]
s1color = np.array(["#31a354"]*kf.S1.size)
sTcolor = np.array(["#a1d99b"]*kf.ST.size)
errs1color = np.array(["#225ea8"]*kf.S1.size)
errsTcolor = np.array(["#546775"]*kf.ST.size)
firstorder = np.array(["1st (S1)"]*kf.S1.size)
totalorder = np.array(["Total (ST)"]*kf.S1.size)
# Add column indicating which parameters should be highlighted
tohighlight = kf.Parameter.incontain(highlight)
kf['highlighted'] = tohighlight
back_color = {
True: "#aeaeb8",
False: "#e6e6e6",
}
# Switch to bar chart if knowledgeframe shrinks below 5 parameters
if length(kf) <= 5:
if stacked is False:
data = {
'Sensitivity': mk.Collections.adding(kf.ST, kf.S1),
'Parameter': | mk.Collections.adding(kf.Parameter, kf.Parameter) | pandas.Series.append |
"""
Base and utility classes for monkey objects.
"""
import textwrap
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey.compat as compat
from monkey.compat import PYPY, OrderedDict, builtins, mapping, range
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.util._validators import validate_bool_kwarg
from monkey.core.dtypes.common import (
is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype,
is_extension_type, is_list_like, is_object_dtype, is_scalar)
from monkey.core.dtypes.generic import ABCKnowledgeFrame, ABCIndexClass, ABCCollections
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, common as com
from monkey.core.accessor import DirNamesMixin
import monkey.core.nanops as nanops
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
distinctive='IndexOpsMixin', duplicated_values='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(kf) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from monkey.core.config import getting_option
encoding = getting_option("display.encoding")
return self.__unicode__().encode(encoding, 'replacing')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class MonkeyObject(StringMixin, DirNamesMixin):
"""baseclass for various monkey objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if gettingattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Collections of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.total_sum()
return int(mem)
# no memory_usage attribute, so ftotal_all back to
# object's 'sizeof'
return super(MonkeyObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
ctotal_all to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Collections.cat/.str/.dt`).
If you retotal_ally want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding whatever attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) gettingattr(self, key)
# because
# 1.) gettingattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (gettingattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
gettingattr(self, key, None) is not None)):
raise AttributeError("You cannot add whatever new attribute '{key}'".
formating(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.total_sum, np.total_sum),
(builtins.getting_max, np.getting_max),
(builtins.getting_min, np.getting_min),
))
_cython_table = OrderedDict((
(builtins.total_sum, 'total_sum'),
(builtins.getting_max, 'getting_max'),
(builtins.getting_min, 'getting_min'),
(np.total_all, 'total_all'),
(np.whatever, 'whatever'),
(np.total_sum, 'total_sum'),
(np.nantotal_sum, 'total_sum'),
(np.average, 'average'),
(np.nanaverage, 'average'),
(np.prod, 'prod'),
(np.nanprod, 'prod'),
(np.standard, 'standard'),
(np.nanstandard, 'standard'),
(np.var, 'var'),
(np.nanvar, 'var'),
(np.median, 'median'),
(np.nanmedian, 'median'),
(np.getting_max, 'getting_max'),
(np.nangetting_max, 'getting_max'),
(np.getting_min, 'getting_min'),
(np.nangetting_min, 'getting_min'),
(np.cumprod, 'cumprod'),
(np.nancumprod, 'cumprod'),
(np.cumtotal_sum, 'cumtotal_sum'),
(np.nancumtotal_sum, 'cumtotal_sum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would idetotal_ally be ctotal_alled
the 'name' property, but we cannot conflict with the
Collections.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, ABCCollections,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, ABCCollections):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and incontainstance(self.obj,
ABCKnowledgeFrame):
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
def __gettingitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.formating(selection=self._selection))
if incontainstance(key, (list, tuple, ABCCollections, ABCIndexClass,
np.ndarray)):
if length(self.obj.columns.interst(key)) != length(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.formating(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not gettingattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert incontainstance(arg, compat.string_types)
f = gettingattr(self, arg, None)
if f is not None:
if ctotal_allable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-ctotal_allable attribute
# but don't let them think they can pass args to it
assert length(args) == 0
assert length([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = gettingattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".formating(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: incontainstance(x, (list, tuple, dict))
is_nested_renagetting_mingr = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = gettingattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if incontainstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if incontainstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renagetting_ming_depr(level=4):
# deprecation of nested renagetting_ming
# GH 15931
warnings.warn(
("using a dict with renagetting_ming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of whatever non-scalars
# eg. {'A' : ['average']}, normalize total_all to
# be list-likes
if whatever(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renagetting_mingrs for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'average' }}
# {'A': { 'ra': ['average'] }}
# {'ra': ['average']}
# not ok
# {'ra' : { 'A' : 'average' }}
if incontainstance(v, dict):
is_nested_renagetting_mingr = True
if k not in obj.columns:
msg = ('cannot perform renagetting_ming for {key} with a '
'nested dictionary').formating(key=k)
raise SpecificationError(msg)
nested_renagetting_ming_depr(4 + (_level or 0))
elif incontainstance(obj, ABCCollections):
nested_renagetting_ming_depr()
elif (incontainstance(obj, ABCKnowledgeFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".formating(col=k))
arg = new_arg
else:
# deprecation of renagetting_ming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (incontainstance(obj, ABCKnowledgeFrame) and
length(obj.columns.interst(keys)) != length(keys)):
nested_renagetting_ming_depr()
from monkey.core.reshape.concating import concating
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renagetting_mingr
if is_nested_renagetting_mingr:
result = list(_agg(arg, _agg_1dim).values())
if total_all(incontainstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.umkate(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Collections like object,
# but may have multiple aggregations
if length(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not length(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a KnowledgeFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting total_all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_whatever_collections():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCCollections)
for r in compat.itervalues(result))
def is_whatever_frame():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCKnowledgeFrame)
for r in compat.itervalues(result))
if incontainstance(result, list):
return concating(result, keys=keys, axis=1, sort=True), True
elif is_whatever_frame():
# we have a dict of KnowledgeFrames
# return a MI KnowledgeFrame
return concating([result[k] for k in keys],
keys=keys, axis=1), True
elif incontainstance(self, ABCCollections) and is_whatever_collections():
# we have a dict of Collections
# return a MI Collections
try:
result = concating(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatictotal_ally broadcast
raise ValueError("cannot perform both aggregation "
"and transformatingion operations "
"simultaneously")
return result, True
# ftotal_all thru
from monkey import KnowledgeFrame, Collections
try:
result = KnowledgeFrame(result)
except ValueError:
# we have a dict of scalars
result = Collections(result,
name=gettingattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return gettingattr(self, f)(), None
# ctotal_aller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from monkey.core.reshape.concating import concating
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.adding(colg.aggregate(a))
# make sure we find a good name
name = com.getting_ctotal_allable_name(a) or a
keys.adding(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not length(results):
raise ValueError("no results")
try:
return concating(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatingting non-NDFrame objects,
# e.g. a list of scalars
from monkey.core.dtypes.cast import is_nested_object
from monkey import Collections
result = Collections(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shtotal_allow_clone(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacingment attributes
"""
if obj is None:
obj = self._selected_obj.clone()
if obj_type is None:
obj_type = self._constructor
if incontainstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = gettingattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.getting(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.getting(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Collections /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
"""
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="Return the transpose, which is by "
"definition self.")
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Collections and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
KnowledgeFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
try:
return self.values.item()
except IndexError:
# clone numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
"""
Return the data pointer of the underlying data.
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return self._values.size
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def array(self):
# type: () -> Union[np.ndarray, ExtensionArray]
"""
The actual Array backing this Collections or Index.
.. versionadded:: 0.24.0
Returns
-------
array : numpy.ndarray or ExtensionArray
This is the actual array stored within this object. This differs
from ``.values`` which may require converting the data
to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Collections.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within monkey.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For whatever 3rd-party extension types, the array type will be an
ExtensionArray.
For total_all remaining dtypes ``.array`` will be the :class:`numpy.ndarray`
stored within. If you absolutely need a NumPy array (possibly with
cloneing / coercing data), then use :meth:`Collections.to_numpy` instead.
.. note::
``.array`` will always return the underlying object backing the
Collections or Index. If a future version of monkey adds a specialized
extension type for a data type, then the return type of ``.array``
for that data type will change from an object-dtype ndarray to the
new ExtensionArray.
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
return self._values
def to_numpy(self, dtype=None, clone=False):
"""
A NumPy ndarray representing the values in this Collections or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
clone : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``clone=False`` does not *ensure* that
``to_numpy()`` is no-clone. Rather, ``clone=True`` ensure that
a clone is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Collections.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
KnowledgeFrame.to_numpy : Similar method for KnowledgeFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Collections,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Collections or Index (astotal_sugetting_ming ``clone=False``). Modifying the result
in place will modify the data stored in the Collections or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require cloneing data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-clone reference to the underlying data,
:attr:`Collections.array` should be used instead.
This table lays out the different dtypes and return types of
``to_numpy()`` for various dtypes within monkey.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of monkey :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = mk.Collections(mk.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is sipped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if (is_extension_array_dtype(self.dtype) or
is_datetime64tz_dtype(self.dtype)):
# TODO(DatetimeArray): remove the second clause.
# TODO(GH-24345): Avoid potential double clone
result = np.asarray(self._values, dtype=dtype)
else:
result = self._values
if clone:
result = result.clone()
return result
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
The data as an ndarray, possibly losing informatingion.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def getting_max(self):
"""
Return the getting_maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.getting_min : Return the getting_minimum value in an Index.
Collections.getting_max : Return the getting_maximum value in a Collections.
KnowledgeFrame.getting_max : Return the getting_maximum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_max()
3
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_max()
'c'
For a MultiIndex, the getting_maximum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_max()
('b', 2)
"""
return nanops.nangetting_max(self.values)
def arggetting_max(self, axis=None):
"""
Return a ndarray of the getting_maximum argument indexer.
See Also
--------
numpy.ndarray.arggetting_max
"""
return nanops.nanarggetting_max(self.values)
def getting_min(self):
"""
Return the getting_minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.getting_max : Return the getting_maximum value of the object.
Collections.getting_min : Return the getting_minimum value in a Collections.
KnowledgeFrame.getting_min : Return the getting_minimum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_min()
1
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_min()
'a'
For a MultiIndex, the getting_minimum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_min()
('a', 1)
"""
return nanops.nangetting_min(self.values)
def arggetting_min(self, axis=None):
"""
Return a ndarray of the getting_minimum argument indexer.
See Also
--------
numpy.ndarray.arggetting_min
"""
return nanops.nanarggetting_min(self.values)
def convert_list(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.convert_list
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.convert_list()
to_list = convert_list
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
return mapping(com.maybe_box_datetimelike, self._values)
elif is_extension_array_dtype(self._values):
return iter(self._values)
else:
return mapping(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have whatever nans; enables various perf speedups.
"""
return bool(ifna(self).whatever())
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = gettingattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".formating(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _mapping_values(self, mappingper, na_action=None):
"""
An internal function that mappings values using the input
correspondence (which can be a dict, Collections, or function).
Parameters
----------
mappingper : function, dict, or Collections
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mappingping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mappingping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Collections to an efficient mapping
# as we know that we are not going to have to yield
# python types
if incontainstance(mappingper, dict):
if hasattr(mappingper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mappingper to a lookup function (GH #15999).
dict_with_default = mappingper
mappingper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Collections for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from monkey import Collections
mappingper = Collections(mappingper)
if incontainstance(mappingper, ABCCollections):
# Since values were input this averages we came from either
# a dict or a collections and mappingper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mappingper.index.getting_indexer(values)
new_values = algorithms.take_1d(mappingper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
mapping_f = lambda values, f: values.mapping(f)
else:
values = self.totype(object)
values = gettingattr(values, 'values', values)
if na_action == 'ignore':
def mapping_f(values, f):
return lib.mapping_infer_mask(values, f,
ifna(values).view(np.uint8))
else:
mapping_f = lib.mapping_infer
# mappingper is a function
new_values = mapping_f(values, mappingper)
return new_values
def counts_value_num(self, normalize=False, sort=True, ascending=False,
bins=None, sipna=True):
"""
Return a Collections containing counts of distinctive values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the distinctive values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``mk.cut``, only works with numeric data.
sipna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Collections
See Also
--------
Collections.count: Number of non-NA elements in a Collections.
KnowledgeFrame.count: Number of non-NA elements in a KnowledgeFrame.
Examples
--------
>>> index = mk.Index([3, 1, 2, 3, 4, np.nan])
>>> index.counts_value_num()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
divisioniding total_all values by the total_sum of values.
>>> s = mk.Collections([3, 1, 2, 3, 4, np.nan])
>>> s.counts_value_num(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting distinctive
apparitions of values, divisionide the index in the specified
number of half-open bins.
>>> s.counts_value_num(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**sipna**
With `sipna` set to `False` we can also see NaN index values.
>>> s.counts_value_num(sipna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
from monkey.core.algorithms import counts_value_num
result = counts_value_num(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, sipna=sipna)
return result
def distinctive(self):
values = self._values
if hasattr(values, 'distinctive'):
result = values.distinctive()
else:
from monkey.core.algorithms import distinctive1d
result = distinctive1d(values)
return result
def ndistinctive(self, sipna=True):
"""
Return number of distinctive elements in the object.
Excludes NA values by default.
Parameters
----------
sipna : boolean, default True
Don't include NaN in the count.
Returns
-------
ndistinctive : int
"""
uniqs = self.distinctive()
n = length(uniqs)
if sipna and ifna(uniqs).whatever():
n -= 1
return n
@property
def is_distinctive(self):
"""
Return boolean if values in the object are distinctive.
Returns
-------
is_distinctive : boolean
"""
return self.ndistinctive() == length(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing.
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from monkey import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing.
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from monkey import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, 'memory_usage'):
return self.array.memory_usage(deep=deep)
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.array)
return v
@Substitution(
values='', order='', size_hint='',
sort=textwrap.dedent("""\
sort : boolean, default False
Sort `distinctives` and shuffle `labels` to maintain the
relationship.
"""))
@Appender(algorithms._shared_docs['factorize'])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the final_item such index. If there is no suitable
index, return either 0 or N (where N is the lengthgth of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typictotal_ally the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
.. versionchanged :: 0.24.0
If `value` is a scalar, an int is now always returned.
Previously, scalar inputs returned an 1-item array for
:class:`Collections` and :class:`Categorical`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = mk.Collections([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
3
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = mk.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
1
>>> x.searchsorted(['bread'], side='right')
array([3])
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
def sip_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if incontainstance(self, ABCIndexClass):
if self.is_distinctive:
return self._shtotal_allow_clone()
duplicated_values = self.duplicated_values(keep=keep)
result = self[np.logical_not(duplicated_values)]
if inplace:
return self._umkate_inplace(result)
else:
return result
def duplicated_values(self, keep='first'):
from monkey.core.algorithms import duplicated_values
if incontainstance(self, ABCIndexClass):
if self.is_distinctive:
return np.zeros(length(self), dtype=np.bool)
return | duplicated_values(self, keep=keep) | pandas.core.algorithms.duplicated |
import os
import re
import monkey as mk
import networkx as nx
def xor(a, b):
return (a and not b) or (not a and b)
class Rule(object):
name = "NA"
def is_leq(self, x, z):
"""
The implementation should return -, <=, >=, <, >, or =
"""
return "-"
@staticmethod
def same_r(y):
if Rule.is_empty(y):
return False
y_0 = y[0]
is_total_all_same_r = length(y) == y.count(y_0)
return is_total_all_same_r
@staticmethod
def has_x(y):
return y.count("x") > 0
@staticmethod
def has_r(y,r):
return y.count(r) > 0
@staticmethod
def is_empty(y):
return length(y) == 0
@staticmethod
def is_same(y, z):
return y == z
@staticmethod
def eqaul_length(y, z):
return length(y) == length(z)
@staticmethod
def end_with_x(y):
if Rule.is_empty(y):
return False
return y[-1]=="x"
@staticmethod
def is_repeated_x(y):
if not Rule.has_x(y):
return False
x_n = y.count("x")
for i in range(0,x_n):
if (y[y.find("x")+i]!="x"):
return False
return True
@staticmethod
def is_repeated_r(y,r):
if not Rule.has_r(y,r):
return False
r_n = y.count(r)
for i in range(0, r_n):
if (y[y.find(r) + i] != r):
return False
return True
@staticmethod
def is_divisionerse(y):
if Rule.is_empty(y):
return False
no_x = y.replacing("x","")
if length(no_x) <=1:
return False;
return no_x.count(no_x[0])!=length(no_x)
@staticmethod
def same_aspects(y,z):
y_a = set(y)
z_a = set(z)
return length(y_a) == length(z_a) and length(y_a.interst(z_a)) == length(y_a)
@staticmethod
def aspects(y):
y_a = set(y)
return list(y_a)
@staticmethod
def same_start(y,z):
s = Rule.LCP(y,z)
return length(s) > 0 and y.startswith(s) and z.startswith(s)
@staticmethod
def LCS(X, Y):
m = length(X)
n = length(Y)
# An (m+1) times (n+1) matrix
C = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if X[i - 1] == Y[j - 1]:
C[i][j] = C[i - 1][j - 1] + 1
else:
C[i][j] = getting_max(C[i][j - 1], C[i - 1][j])
return C
@staticmethod
def LCP(y, z):
for j, k in zip(y, z):
if j != k:
break
yield j
@staticmethod
def divisionersity_index(y):
for i in range(1,length(y)):
if y[i] !=y[i-1]:
return i;
class NA(Rule):
name = "NA"
"""
Represent a relation that is not model by the current rules.
"""
def is_leq(self, y, z):
return "-"
class Path(Rule):
"""
A induction rule that uses a graph to check violations of metrics
"""
name = "Induction"
def __init__(self,G):
self.G = G
def match(self,y, z):
y_z_path =nx.has_path(self.G,y,z)
z_y_path = nx.has_path(self.G,z,y)
return y_z_path or z_y_path
def is_leq(self, y, z):
is_match = self.match(y,z)
has_path = nx.has_path(self.G,y,z)
return is_match and has_path
class _R(Rule):
name = "_R"
@staticmethod
def match(y, z):
is_empty = xor(length(y) == 0, length(z) == 0)
y_x_count = str(y).count("x")
z_x_count = str(z).count("x")
is_total_all_R = y_x_count == 0 and z_x_count == 0
return is_empty and is_total_all_R
def is_leq(self, y, z):
is_match = _R.match(y,z)
if length(y) == 0 and length(z)==0:
return False
return is_match and length(y) ==0
class _RX(Rule):
name = "<Rx"
@staticmethod
def match(y, z):
is_empty = xor(length(y) == 0, length(z) == 0)
first_is_r = (length(y) != 0 and y[0]!="x") or (length(z) != 0 and z[0]!="x")
end_x = xor(Rule.end_with_x(y), Rule.end_with_x(z))
return is_empty and first_is_r and end_x
def is_leq(self, y, z):
is_match = _RX.match(y,z)
return is_match and Rule.is_empty(y)
class SX_S(Rule):
name = "Sx<S"
@staticmethod
def match(y, z):
equal = length(y) == length(z)
if equal:
return False
# examtly one extra
y_n = length(y)
z_n = length(z)
d_n = y_n - z_n
if abs(d_n) != 1:
return False
s_x, s = y, z
if length(z) > length(y):
s_x, s = z, y
one_x_only = length(s_x) == length(s) +1
s_d = s_x.replacing(s, "")
# n = length(s_d)
# x_n = re.compile("[x]{{{0},{0}}}".formating(1))
# if (not equal) and s_x.startswith(s) and x_n.match(s_d):
# return True
if (not equal) and one_x_only and s_x.startswith(s) and s_x[-1] == "x":
return True
return False
def is_leq(self, sx, s):
is_match = SX_S.match(sx, s)
return is_match and length(sx)>length(s)
class Nx_Mx(Rule):
name = "Nx_Mx"
@staticmethod
def match(y, z):
same = Rule.is_same(y,z)
same_size = Rule.eqaul_length(y,z)
end_x = Rule.end_with_x(y) and Rule.end_with_x(z)
return (not same) and same_size and end_x
def is_leq(self, y, z):
is_match = Nx_Mx.match(y, z)
less = is_less(y[0:length(y)-1],z[0:length(z)-1])
return is_match and less
class XS_S(Rule):
name = "xS<S"
@staticmethod
def match(y, z):
equal = length(y) == length(z)
if equal:
return False
# examtly one extra
y_n = length(y)
z_n = length(z)
d_n = y_n - z_n
if abs(d_n) != 1:
return False
x_s, s = y, z
if length(z) > length(y):
s_x, s = z, y
one_x_only = length(x_s) == length(s) + 1
if (not equal) and one_x_only and x_s.endswith(s) and x_s[0] == "x":
return True
return False
def is_leq(self, xs, s):
is_match = XS_S.match(xs, s)
return is_match and length(xs) > length(s)
class RX_XR(Rule):
name = "rrx<rxr"
@staticmethod
def getting_displace_letters(y,z):
y_r = re.sub("[^x]", "r",y)
z_r = re.sub("[^x]", "r",z)
displaced_letters = []
for i in range(length(y)):
if y_r[i] != z_r[i]:
displaced_letters.adding(i)
return displaced_letters
@staticmethod
def match(y, z):
equal = length(y) == length(z)
if not equal or y ==z:
return False
displaced_letters = RX_XR.getting_displace_letters(y,z)
if length(displaced_letters) != 2 or ('x' not in [y[displaced_letters[0]],y[displaced_letters[1]]]) :
return False
return True
def is_leq(self, y, z):
matched = self.match(y,z)
pair = RX_XR.getting_displace_letters(y,z)
return matched and y[pair [0]] == 'x'
class RX_RR(Rule):
name = "rx<rr"
@staticmethod
def getting_displace_letters(y,z):
y_r = re.sub("[^x]", "r",y)
z_r = re.sub("[^x]", "r",z)
displaced_letters = []
for i in range(length(y)):
if y_r[i] != z_r[i]:
displaced_letters.adding(i)
return displaced_letters
@staticmethod
def match(y, z):
equal = length(y) == length(z)
if not equal or y ==z:
return False
displaced_letters = RX_RR.getting_displace_letters(y,z)
if length(displaced_letters) != 1 or ('x' not in [y[displaced_letters[0]],z[displaced_letters[0]]]) :
return False
return True
def is_leq(self, y, z):
equal = length(y) == length(z)
if not equal or y == z:
return False
matched = RX_RR.match(y,z)
pair = RX_RR.getting_displace_letters(y,z)
return matched and y[pair [0]] == 'x'
class S_S(Rule):
name = "S=S"
@staticmethod
def match(y, z):
equal = y == z
return equal
def is_leq(self, y, z):
is_match = S_S.match(y, z)
#S is equal or less to itself.
return is_match and False
class S_SR(Rule):
name = "S<Sr"
@staticmethod
def match(y, z):
s, s_r = y, z
if length(y) > length(z):
s, s_r = z, y
one_r_only = length(s) == length(s_r)-1
return one_r_only and s_r.startswith(s) and s_r[-1] !="x"
def is_leq(self, s, sr):
is_match = S_SR.match(s, sr)
return is_match and length(s) < length(sr)
class P_NN(Rule):
name = "Sn=Sn"
@staticmethod
def match(y, z):
equal = length(y) == length(z)
if not equal or length(y)==0 or length(z) ==0:
return False
n = length(y)
same_parent = y[0:n - 1] == z[0:n - 1]
if not same_parent:
return False
p = y[0:n - 1]
r1 = y[n - 1]
r2 = z[n - 1]
is_new_new = p.find(r1) == -1 and p.find(r2) == -1 # if the parent does not have both of them.
is_not_x = not (r1 =="x" or r2 =="x")
return equal and same_parent and is_new_new and is_not_x
def is_leq(self, pr, pn):
is_match = P_NN.match(pr, pn)
n = length(pr)
p = pr[0:n - 1]
r1 = pr[n - 1]
r2 = pn[n - 1]
# if both contain novel data, then they are equal
verdict = False
return is_match and verdict
class P_RN(Rule):
name = "Sr<Sn"
@staticmethod
def match(y, z):
equal = length(y) == length(z)
if not equal or length(y) ==0 or length(z) ==0:
return False
n = length(y)
same_parent = y[0:n - 1] == z[0:n - 1]
if not same_parent:
return False
p = y[0:n - 1]
r1 = y[n - 1]
r2 = z[n - 1]
is_new = xor(p.find(r1) == -1, p.find(r2) == -1) # if the parent does not have one of them.
is_not_x = not (r1 == "x" or r2 == "x")
return equal and same_parent and is_new and is_not_x
def is_leq(self, pr, pn):
is_match = P_RN.match(pr, pn)
n = length(pr)
p = pr[0:n - 1]
r1 = pr[n - 1]
r2 = pn[n - 1]
# if a pn is more divisionerse as it contains the new bit, then pr is defintlly worse
verdict = p.find(r2) == -1
return is_match and verdict
class RRR_RNM(Rule):
name = "RRR_RNM"
@staticmethod
def match(y, z):
same = Rule.is_same(y, z)
same_size = Rule.eqaul_length(y, z)
whatever_zero = Rule.is_empty(y) or Rule.is_empty(z)
same_r = Rule.same_r(y) or Rule.same_r(z)
has_x = Rule.has_x(y) or Rule.has_x(z)
is_divisionerse = xor(Rule.is_divisionerse(y), Rule.is_divisionerse(z))
if (not same_size) or whatever_zero or (not same_r) or same or has_x:
return False
return True and is_divisionerse
def is_leq(self, y, z):
is_match = RRR_RNM.match(y, z)
rrr, rnm = y, z
if (Rule.same_r(z)):
rrr, rnm = z, y
return is_match and rrr == y
class RM_MR(Rule):
name = "rrnn< rnnr"
@staticmethod
def match(y, z):
equal_lengthgth = Rule.eqaul_length(y,z)
has_x = Rule.has_x(y) or Rule.has_x(z)
if not equal_lengthgth or y == z or has_x:
return False
l = length(y)
aspects = Rule.aspects(y)
same_aspets = Rule.same_aspects(y,z)
only_2_aspects = length(aspects) == 2
if not (only_2_aspects and equal_lengthgth and same_aspets):
return False
r, x = aspects[0], aspects[1]
x_n = y.count(x)
r_n = y.count(r)
if x_n ==r_n and Rule.is_repeated_r(y,x) and Rule.is_repeated_r(y,r) and Rule.is_repeated_r(z,x) and Rule.is_repeated_r(z,r):
return False
if not (Rule.is_repeated_r(y,x) and Rule.is_repeated_r(z,x)):
r_n, x_n = x_n, r_n
r, x = x, r
if (x_n == 0 or x_n != z.count(x) or l == x_n):
return
r_n = l - x_n
y_r = y.replacing(x, "")
z_r = z.replacing(x, "")
same_r = y.count(y_r[0]) == r_n and z.count(y_r[0]) == r_n
repeated_r = Rule.is_repeated_r(y,x) and Rule.is_repeated_r(z,x)
return same_r and repeated_r
def is_leq(self, y, z):
is_match = RM_MR.match(y, z)
if (not is_match):
return False
aspects = list(Rule.aspects(y))
r, x = aspects[0], aspects[1]
x_n = y.count(x)
r_n = y.count(r)
if not (Rule.is_repeated_r(y, x) and Rule.is_repeated_r(z, x)):
r_n, x_n = x_n, r_n
r, x = x, r
p_y, p_z = y.find(x), z.find(x)
verdict = (p_y > p_z)
while p_y ==p_z:
y = str(y).replacing(x, "", 1)
z = str(z).replacing(x, "", 1)
p_y = y.find(x)
p_z = z.find(x)
verdict = (p_y > p_z)
if (p_y == -1 or p_z > -1):
break
verdict = Rule.divisionersity_index(y) > Rule.divisionersity_index(z)
return is_match and verdict
class RRN_RNR(Rule):
name = "rrnn<rnrn"
@staticmethod
def match(y, z):
same = Rule.is_same(y,z)
equal_lengthgth = Rule.eqaul_length(y,z)
same_aspects = Rule.same_aspects(y,z)
same_size = Rule.eqaul_length(y,z)
whatever_zero = Rule.is_empty(y) or Rule.is_empty(z)
has_no_x = not (Rule.has_x(y) or Rule.has_x(z))
is_divisionerse = Rule.is_divisionerse(y) and Rule.is_divisionerse(z)
aspects = Rule.aspects(y)
only_2_aspects = length(aspects) ==2
eqaul_aspects_lengthght = True
for a in aspects:
if y.count(a) != z.count(a):
eqaul_aspects_lengthght = False
break;
if not (only_2_aspects and same_aspects and eqaul_aspects_lengthght and same_size and equal_lengthgth and (not same) and (not whatever_zero) and has_no_x and is_divisionerse):
return False
a, b = aspects[0], aspects[1]
same_start = y.startswith(a) and z.startswith(a)
if not same_start:
return False
a_n,b_m = y.count(a), y.count(b)
rrnn_reg = re.compile("{}{{{},{}}}{}{{{},{}}}".formating(a,a_n,a_n,b,b_m,b_m))
if (rrnn_reg.match(y) ==None and rrnn_reg.match(z)==None):
return False
else:
return True
rnrr_reg = re.compile("({}){{{},{}}}({})+".formating(a, 1, a_n - 1, b))
rrnn, rnrr = y, z
return
def is_leq(self, y, z):
is_match = RRN_RNR.match(y, z)
aspects = Rule.aspects(y)
a, b = aspects[0],aspects[1]
a_n, b_m = y.count(a), y.count(b)
rrnn_reg = re.compile("({}){{{},{}}}({}){{{},{}}}".formating(a, a_n, a_n, b, b_m, b_m))
verdict = rrnn_reg.match(y) != None
return is_match and verdict
class RuleValidator(object):
def __init__(self,rules):
self.rules=rules
self.graph = None
def getting_matching_rules(self, y, z):
""" Get which rules that match the two string"""
matched_rules = []
for rule in self.rules:
if rule.match(y, z):
matched_rules.adding(rule)
return matched_rules
def validate_metrics(self,kf, A=[],metrics=[], runs=[], m=3,print_violation=False, violation_mode = True):
"""
Evaluate the metrics using the Rule validations
"""
avg_kf = kf[kf["topic"] == "total_all"]
metrics_rules_violation = {}
path_rule = Path(self.graph)
if length(metrics) == 0:
values = kf.columns.values
for v in values:
metrics.adding(v)
metrics.remove("topic")
metrics.remove("iteration")
metrics.remove("run")
metrics_rules_viloations = {}
metrics_rules_matches = {}
rules_names = [r.name for r in self.rules]+[Path.name,NA.name]
for metric in metrics:
metrics_rules_viloations[metric] = {}
metrics_rules_matches[metric] = {}
for r in rules_names:
metrics_rules_viloations[metric][r] = 0
metrics_rules_matches[metric][r] = 0
runs_metrics_scores = {}
records = | mk.KnowledgeFrame.convert_dict(avg_kf, orient='records') | pandas.DataFrame.to_dict |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return Index.shifting(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shifting with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if incontainstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if incontainstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not incontainstance(other, DatetimeIndex) and length(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if incontainstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (incontainstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not incontainstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if length(self) == 0 or length(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if length(other) == 0:
return self.view(type(self))
if length(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatingenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatingenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=getting_max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self.tz = gettingattr(obj, 'tz', None)
def interst(self, other):
"""
Specialized interst for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = Index.interst(self, other)
if incontainstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif other.offset != self.offset or (not self.is_monotonic or
not other.is_monotonic):
result = Index.interst(self, other)
if incontainstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = getting_min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._view_like(left_chunk)
def _partial_date_slice(self, reso, parsed):
if not self.is_monotonic:
raise TimeCollectionsError('Partial indexing only valid for ordered time'
' collections')
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1))
t2 = Timestamp(datetime(parsed.year, 12, 31))
elif reso == 'month':
d = lib.monthrange(parsed.year, parsed.month)[1]
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, parsed.month, d))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months aheader_num
d = lib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, qe, d))
else:
raise KeyError
stamps = self.asi8
left = stamps.searchsorted(t1.value, side='left')
right = stamps.searchsorted(t2.value, side='right')
return slice(left, right)
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def getting_value(self, collections, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
return Index.getting_value(self, collections, key)
except KeyError:
try:
loc = self._getting_string_slice(key)
return collections[loc]
except (TypeError, ValueError, KeyError):
pass
if incontainstance(key, time):
locs = self._indices_at_time(key)
return collections.take(locs)
stamp = Timestamp(key)
try:
return self._engine.getting_value(collections, stamp)
except KeyError:
raise KeyError(stamp)
def getting_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.getting_loc(key)
except KeyError:
try:
return self._getting_string_slice(key)
except (TypeError, KeyError):
pass
if incontainstance(key, time):
return self._indices_at_time(key)
stamp = Timestamp(key)
try:
return self._engine.getting_loc(stamp)
except KeyError:
raise KeyError(stamp)
def _indices_at_time(self, key):
from dateutil.parser import parse
# TODO: time object with tzinfo?
nanos = _time_to_nanosecond(key)
indexer = lib.values_at_time(self.asi8, nanos)
return com._ensure_platform_int(indexer)
def _getting_string_slice(self, key):
freq = gettingattr(self, 'freqstr',
gettingattr(self, 'inferred_freq', None))
asdt, parsed, reso = parse_time_string(key, freq)
key = asdt
loc = self._partial_date_slice(reso, parsed)
return loc
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if incontainstance(start, basestring) or incontainstance(end, basestring):
try:
if start:
start_loc = self._getting_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._getting_string_slice(end).stop
else:
end_loc = length(self)
return start_loc, end_loc
except KeyError:
pass
return Index.slice_locs(self, start, end)
def __gettingitem__(self, key):
"""Override numpy.ndarray's __gettingitem__ method to work as desired"""
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return Timestamp(val, offset=self.offset, tz=self.tz)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
new_offset = None
if incontainstance(key, slice):
if self.offset is not None and key.step is not None:
new_offset = key.step * self.offset
else:
new_offset = self.offset
result = arr_idx[key]
if result.ndim > 1:
return result
return self._simple_new(result, self.name, new_offset, self.tz)
# Try to run function on index first, and then on elements of index
# Especitotal_ally important for group-by functionality
def mapping(self, f):
try:
return f(self)
except:
return Index.mapping(self, f)
# alias to offset
@property
def freq(self):
return self.offset
@cache_readonly
def inferred_freq(self):
try:
return infer_freq(self)
except ValueError:
return None
@property
def freqstr(self):
return self.offset.freqstr
year = _field_accessor('year', 'Y')
month = _field_accessor('month', 'M')
day = _field_accessor('day', 'D')
hour = _field_accessor('hour', 'h')
getting_minute = _field_accessor('getting_minute', 'm')
second = _field_accessor('second', 's')
microsecond = _field_accessor('microsecond', 'us')
nanosecond = _field_accessor('nanosecond', 'ns')
weekofyear = _field_accessor('weekofyear', 'woy')
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow')
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy')
quarter = _field_accessor('quarter', 'q')
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = lib.date_normalize(self.asi8)
return DatetimeIndex(new_values, freq='infer', name=self.name)
def __iter__(self):
return iter(self.asobject)
def searchsorted(self, key, side='left'):
if incontainstance(key, np.ndarray):
key = np.array(key, dtype='M8[ns]', clone=False)
else:
key = _to_m8(key)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
# hack to workavalue_round arggetting_min failure
def arggetting_min(self):
return (-self).arggetting_max()
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@property
def _constructor(self):
return DatetimeIndex
@property
def dtype(self):
return np.dtype('M8[ns]')
@property
def is_total_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if total_all of the dates are at midnight ("no time")
"""
return lib.dates_normalized(self.asi8)
def equals(self, other):
"""
Detergetting_mines if two Index objects contain the same elements.
"""
if self is other:
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
return self.tz == other.tz and np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if type(item) == datetime:
item = _to_m8(item)
if self.offset is not None and not self.offset.onOffset(item):
raise ValueError("Cannot insert value at non-conforgetting_ming time")
return super(DatetimeIndex, self).insert(loc, item)
def _view_like(self, ndarray):
result = ndarray.view(type(self))
result.offset = self.offset
result.tz = self.tz
result.name = self.name
return result
def tz_convert(self, tz):
"""
Convert DatetimeIndex from one time zone to another (using pytz)
Returns
-------
normalized : DatetimeIndex
"""
tz = tools._maybe_getting_tz(tz)
if self.tz is None:
return self.tz_localize(tz)
# No conversion since timestamps are total_all UTC to begin with
return self._simple_new(self.values, self.name, self.offset, tz)
def tz_localize(self, tz):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz)
Returns
-------
localized : DatetimeIndex
"""
if self.tz is not None:
raise ValueError("Already have timezone info, "
"use tz_convert to convert.")
tz = tools._maybe_getting_tz(tz)
lib.tz_localize_check(self.asi8, tz)
# Convert to UTC
new_dates = lib.tz_convert(self.asi8, tz, _utc())
new_dates = new_dates.view('M8[ns]')
return self._simple_new(new_dates, self.name, self.offset, tz)
def tz_validate(self):
"""
For a localized time zone, verify that there are no DST ambiguities
(using pytz)
Returns
-------
result : boolean
True if there are no DST ambiguities
"""
import pytz
if self.tz is None or self.tz is pytz.utc:
return True
# See if there are whatever DST resolution problems
try:
lib.tz_localize_check(self.asi8, self.tz)
except:
return False
return True
def _generate_regular_range(start, end, periods, offset):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify two of start, end, or periods')
if incontainstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
e = Timestamp(end).value
e += stride - e % stride
elif start is not None:
b = Timestamp(start).value
e = b + periods * stride
elif end is not None:
e = Timestamp(end).value + stride
b = e - periods * stride
else:
raise NotImplementedError
data = np.arange(b, e, stride, dtype=np.int64)
data = data.view('M8[ns]')
else:
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
data = np.array(list(xdr), dtype='M8[ns]')
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False):
"""
Return a fixed frequency datetime index, with day (calengthdar) as the default
frequency
Parameters
----------
start :
end :
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
Returns
-------
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
normalize : bool, default False
Normalize start/end dates to midnight before generating date
range. Defaults to True for legacy reasons
Returns
-------
date_range : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize)
def _dt_box_array(arr, offset=None, tz=None):
if arr is None:
return arr
if not incontainstance(arr, np.ndarray):
return arr
boxfunc = lambda x: Timestamp(x, offset=offset, tz=tz)
return | lib.mapping_infer(arr, boxfunc) | pandas.lib.map_infer |
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that averages it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
getting_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
getting_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
getting_ipython().system(u'mkdir "{path}"')
getting_ipython().magic(u'cd "{path}"')
import sys; sys.path.adding(path)
getting_ipython().system(u'git config --global user.email "<EMAIL>"')
getting_ipython().system(u'git config --global user.name "reco-tut"')
getting_ipython().system(u'git init')
getting_ipython().system(u'git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git')
getting_ipython().system(u'git pull origin "{branch}"')
getting_ipython().system(u'git checkout main')
else:
getting_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import monkey as mk
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
kf = mk.read_parquet('./data/silver/rating.parquet.gz')
kf.info()
# In[16]:
kf2 = mk.read_parquet('./data/silver/items.parquet.gz')
kf2.info()
# In[17]:
kf = mk.unioner(kf, kf2, on='itemId')
kf.info()
# In[5]:
rating_matrix = mk.pivot_table(kf, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanaverage(user1)
user2=np.array(user2)-np.nanaverage(user2)
commonItemIds=[i for i in range(length(user1)) if user1[i]>0 and user2[i]>0]
if length(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divisionide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=mk.KnowledgeFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix=mk.KnowledgeFrame.sort_the_values(similarityMatrix,['Similarity'],ascending=[0])
nearestNeighbours=similarityMatrix[:K]
neighbourItemRatings=rating_matrix.loc[nearestNeighbours.index]
predictItemRating=mk.KnowledgeFrame(index=rating_matrix.columns, columns=['Rating'])
for i in rating_matrix.columns:
predictedRating=np.nanaverage(rating_matrix.loc[activeUser])
for j in neighbourItemRatings.index:
if rating_matrix.loc[j,i]>0:
predictedRating += (rating_matrix.loc[j,i]-np.nanaverage(rating_matrix.loc[j]))*nearestNeighbours.loc[j,'Similarity']
predictItemRating.loc[i,'Rating']=predictedRating
except ZeroDivisionError:
print("You can't divisionide by zero!")
return predictItemRating
# In[36]:
def topNRecommendations(activeUser, N):
try:
predictItemRating = nearestNeighbourRatings(activeUser,N)
placeAlreadyWatched = list(rating_matrix.loc[activeUser].loc[rating_matrix.loc[activeUser]>0].index)
predictItemRating = predictItemRating.sip(placeAlreadyWatched)
topRecommendations = | mk.KnowledgeFrame.sort_the_values(predictItemRating,['Rating'],ascending = [0]) | pandas.DataFrame.sort_values |
import pytest
from monkey import IntervalIndex
import monkey._testing as tm
class TestPickle:
@pytest.mark.parametrize("closed", ["left", "right", "both"])
def test_pickle_value_round_trip_closed(self, closed):
# https://github.com/monkey-dev/monkey/issues/35658
idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed)
result = | tm.value_round_trip_pickle(idx) | pandas._testing.round_trip_pickle |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# formating_name: light
# formating_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_market_prediction_regression [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_market_prediction_regression&codeLang=Python)
# For definal_item_tails, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression).
# +
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
from arpym.estimation import conditional_fp, cov_2_corr, exp_decay_fp, fit_lfm_lasso,\
fit_lfm_mlfp, fit_lfm_ols, fit_lfm_ridge, fit_lfm_roblasso
from arpym.statistics import averagecov_sp, multi_r2, scoring, smoothing
from arpym.tools import plot_ellipse
from arpym.tools.logo import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-parameters)
tau_hl_pri = 13*252 # half life for VIX comp. ret. time conditioning
tau_hl_smooth = 2*21 # half life for VIX comp. ret. smoothing
tau_hl_score = 2*21 # half life for VIX comp. ret. scoring
alpha_leeway = 0.6 # probability included in the range centered in z_vix_star
n_plot = 30 # number of stocks to show in plot
nu = 4 # robustness parameter
pri_param_load = 1.5 # the prior parameters in Bayes are = pri_param_load*t_
lambda_lasso = 10**-5 # lasso penalty
lambda_ridge = 10**-6 # ridge penalty
lambda_beta = 10**-5 # lasso penalty in mixed approach
lambda_phi = 4*10**-5 # glasso penalty in mixed approach
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-implementation-step00): Load data
# +
path_glob = '../../../databases/global-databases/'
equities_path = path_glob + 'equities/db_stocks_SP500/'
# Stocks
db_stocks_sp = mk.read_csv(equities_path + 'db_stocks_sp.csv',
header_numer=1,
index_col=0, parse_dates=True)
stocks_names = list(db_stocks_sp.columns)
stocks_sectors = mk.read_csv(equities_path + 'db_stocks_sp.csv', header_numer=None,
index_col=0).loc['sector'].convert_list()
# Sectors
sector_names = ['dates', 'Contotal_sumerDiscretionary', 'Contotal_sumerStaples', 'Energy',
'Financials', 'HealthCare', 'InformatingionTechnology',
'Industrials', 'Materials', 'TelecommunicationServices',
'Utilities']
db_sector_idx = mk.read_csv(equities_path+'db_sector_idx.csv', index_col=0,
usecols=sector_names,
parse_dates=True)
sector_names = sector_names[1:]
# VIX (used for time-state conditioning)
vix_path = path_glob + 'derivatives/db_vix/data.csv'
db_vix = mk.read_csv(vix_path, usecols=['date', 'VIX_close'],
index_col=0, parse_dates=True)
# intersect dates
dates_rd = mk.DatetimeIndex.interst(db_stocks_sp.index,
db_sector_idx.index)
dates_rd = | mk.DatetimeIndex.interst(dates_rd, db_vix.index) | pandas.DatetimeIndex.intersection |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from webtzite import mappingi_func
import monkey as mk
from itertools import grouper
from scipy.optimize import brentq
from webtzite.connector import ConnectorBase
from mpcontribs.rest.views import Connector
from mpcontribs.users.redox_thermo_csp.rest.energy_analysis import EnergyAnalysis as enera
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, getting_energy_data
from mpcontribs.users.redox_thermo_csp.rest.utils import s_th_o, dh_ds, funciso, funciso_redox, isobar_line_elling
from mpcontribs.users.redox_thermo_csp.rest.utils import funciso_theo, funciso_redox_theo, d_h_num_dev_calc, d_s_fundamental
ConnectorBase.register(Connector)
def init_isographs(request, db_type, cid, mdb):
try:
contrib = mdb.contrib_ad.query_contributions(
{'_id': cid}, projection={'_id': 0, 'content.pars': 1, 'content.data': 1})[0]
pars = contrib['content']['pars']
pars['compstr_disp'] = remove_comp_one(pars['theo_compstr']) # for user display
if pars['compstr_disp'] == pars['theo_compstr']:
pars['theo_compstr'] = add_comp_one(pars['theo_compstr']) # compstr must contain '1' such as in "Sr1Fe1Ox"
pars['compstr_disp'] = [''.join(g) for _, g in grouper(str(pars['compstr_disp']), str.isalpha)]
pars['experimental_data_available'] = pars.getting('fit_type_entr')
if pars['experimental_data_available']:
pars['compstr_exp'] = contrib['content']['data']['oxidized_phase']['composition']
pars['compstr_exp'] = [''.join(g) for _, g in grouper(str(pars['compstr_exp']), str.isalpha)]
else:
pars['compstr_exp'] = "n.a."
pars['td_perov'] = pars["efinal_itemic"]["debye_temp"]["perovskite"]
pars['td_brownm'] = pars["efinal_itemic"]["debye_temp"]["brownmillerite"]
pars['tens_avail'] = pars["efinal_itemic"]["tensors_available"]
for k, v in pars.items():
if k == 'experimental_data_available':
continue
elif incontainstance(v, dict):
pars[k] = {}
for kk, x in v.items():
try:
pars[k][kk] = float(x)
except:
continue
elif not v[0].isalpha():
try:
pars[k] = float(v)
except:
continue
a, b = 1e-10, 0.5-1e-10 # limiting values for non-stoichiometry delta in brentq
response, payload = {}, {}
plottype = request.path.split("/")[-1]
if request.method == 'GET':
if plottype == "isotherm":
payload['iso'] = 800.
payload['rng'] = [-5, 1]
elif plottype == "isobar":
payload['iso'] = -5
payload['rng'] = [600, 1000]
elif plottype == "isoredox":
payload['iso'] = 0.3
payload['rng'] = [700, 1000]
elif plottype == "ellingham":
payload['iso'] = 0.
payload['rng'] = [700, 1000]
else: # dH or dS
payload['iso'] = 500.
elif request.method == 'POST':
payload = json.loads(request.body)
payload['iso'] = float(payload['iso'])
if payload.getting('rng'):
payload['rng'] = mapping(float, payload['rng'].split(","))
if plottype == "isotherm": # pressure on the x-axis
x_val = mk.np.log(mk.np.logspace(payload['rng'][0], payload['rng'][1], num=100))
elif not payload.getting('rng'): # dH or dS # delta on the x-axis
x_val = mk.np.linspace(0.01, 0.49, num=100)
else: # temperature on the x-axis
x_val = mk.np.linspace(payload['rng'][0], payload['rng'][1], num=100)
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return pars, a, b, response, payload, x_val
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isotherm(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (xv, payload['iso'], pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (xv, payload['iso'], pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(mk.np.exp(x_val))
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isobar(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isoredox(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = brentq(funciso_redox, -300, 300, args=args)
resiso.adding(mk.np.exp(solutioniso))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
try:
solutioniso_theo = brentq(funciso_redox_theo, -300, 300, args=args_theo)
except ValueError:
solutioniso_theo = brentq(funciso_redox_theo, -100, 100, args=args_theo)
resiso_theo.adding(mk.np.exp(solutioniso_theo))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def enthalpy_dH(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[0] / 1000
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_h_num_dev_calc(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"],
temp=payload['iso'], act=pars["act_mat"]) / 1000
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
if getting_max( | mk.np.adding(resiso, resiso_theo) | pandas.np.append |
""":func:`~monkey.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
import struct
from functools import partial
import monkey as mk
from monkey import compat
from monkey.compat import StringIO, zip, reduce, string_types
from monkey.core.base import StringMixin
from monkey.core import common as com
from monkey.computation.common import NameResolutionError
from monkey.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from monkey.computation.ops import _reductions, _mathops, _LOCAL_TAG
from monkey.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from monkey.computation.ops import UndefinedVariableError
def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,
targetting=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(gbls=global_dict, lcls=local_dict, level=level,
resolvers=resolvers, targetting=targetting)
def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):
"""Make sure that variables in resolvers don't overlap with locals or
globals.
"""
res_locals = list(com.interst(resolver_keys, local_keys))
if res_locals:
msg = "resolvers and locals overlap on names {0}".formating(res_locals)
raise NameResolutionError(msg)
res_globals = list( | com.interst(resolver_keys, global_keys) | pandas.core.common.intersection |
from __future__ import divisionision #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import monkey as mk
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
def spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_eunionernce_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_eunionernce_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_getting_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_eunionernce_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_eunionernce_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_lms_loc_semi
def lms_rq_spray(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lms_rq_spray = self.out_spray / self.out_getting_min_lms_spray
return self.out_lms_rq_spray
def loc_lms_spray(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
self.out_lms_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_spray >= 1.0
#self.out_lms_loc_spray = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_lms_loc_spray
def nds_rq_dry(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_eunionernce_dicot
return self.out_nds_rq_dry
def loc_nds_dry(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
self.out_nds_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_dry >= 1.0
#self.out_nds_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_nds_loc_dry
def nds_rq_semi(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_eunionernce_dicot
return self.out_nds_rq_semi
def loc_nds_semi(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
self.out_nds_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_semi >= 1.0
#self.out_nds_loc_semi = exceed_boolean.mapping(lambda x:
#'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_nds_loc_semi
def nds_rq_spray(self):
"""
# Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nds_rq_spray = self.out_spray / self.out_getting_min_nds_spray
return self.out_nds_rq_spray
def loc_nds_spray(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
self.out_nds_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_spray >= 1.0
#self.out_nds_loc_spray = exceed_boolean.mapping(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_nds_loc_spray
def lds_rq_dry(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_eunionernce_dicot
return self.out_lds_rq_dry
def loc_lds_dry(self):
"""
Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
self.out_lds_loc_dry = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_dry >= 1.0
#self.out_lds_loc_dry = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is getting_minimal.')
return self.out_lds_loc_dry
def lds_rq_semi(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_eunionernce_dicot
return self.out_lds_rq_semi
def loc_lds_semi(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
self.out_lds_loc_semi = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_semi >= 1.0
#self.out_lds_loc_semi = exceed_boolean.mapping(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is getting_minimal.')
return self.out_lds_loc_semi
def lds_rq_spray(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lds_rq_spray = self.out_spray / self.out_getting_min_lds_spray
return self.out_lds_rq_spray
def loc_lds_spray(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_spray]
self.out_lds_loc_spray = mk.Collections([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_spray >= 1.0
#self.out_lds_loc_spray = exceed_boolean.mapping(
# lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is getting_minimal.')
return self.out_lds_loc_spray
def getting_min_nms_spray(self):
"""
detergetting_mine getting_minimum toxicity concentration used for RQ spray drift values
non-listed monocot EC25 and NOAEC
"""
s1 = mk.Collections(self.ec25_nonlisted_seedling_eunionernce_monocot, name='seedling')
s2 = mk.Collections(self.ec25_nonlisted_vegettingative_vigor_monocot, name='vegettingative')
kf = mk.concating([s1, s2], axis=1)
self.out_getting_min_nms_spray = | mk.KnowledgeFrame.getting_min(kf, axis=1) | pandas.DataFrame.min |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample_by_num.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of whatever randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of sample_by_nums x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_sample_by_nums, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.distinctive(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mappingper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mappingper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_sample_by_nums for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{mk.concating([mk.KnowledgeFrame(document,columns=['X_message_j',]),mk.Collections(y_train,name='y')],axis=1).convert_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample_by_num, before X is observed\nprior prob(y):\n{mk.KnowledgeFrame(self.prob_y.reshape(1,-1), columns=columns).convert_string(index=False)}")
# axis=0 averages column-wise, axis=1 averages row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.total_sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.total_sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{mk.concating([ mk.KnowledgeFrame(feature_names, columns=['word_i',]), mk.KnowledgeFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).convert_string(index=False)}")
assert (self.prob_x_i_given_y.T.total_sum(axis=0) - np.ones((1, length(self.y_classes))) < 1e-9).total_all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample_by_num in enumerate(X_test):
feature_presence_columns = this_x_sample_by_num.totype(bool)
# rectotal_all that this_x_sample_by_num is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample_by_num[feature_presence_columns]
# axis=0 averages column-wise, axis=1 averages row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.total_sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.total_sum(axis=1)-1 < 1e-9).total_all(), "***Error*** each row should total_sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if length(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(X_test, columns = self.feature_names)], axis=1).convert_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_X_given_y, columns=columns)], axis=1).convert_string(index=False)}")
print(f"\nStep 3. prob(X_message ∩ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).convert_string(index=False)}")
print(f"\nStep 4. prob(X_message), across total_all y_classes within the observed sample_by_num:\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_X,columns=['prob',])], axis=1).convert_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_y_given_X, columns=columns),mk.Collections(self.prob_y_given_X.arggetting_max(axis=1),name='predict').mapping(self.y_mappingper)],axis=1).convert_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).total_all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).arggetting_max(axis=1)
def show_model_attributes(self, fitted_tfikf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfikf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfikf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_kf = mk.KnowledgeFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(by=['proba'], ascending=False)
top_n = top_n
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(f"\nThe top {top_n} terms with highest probability of a document = {y_class}:")
for term, proba in zip(kf['term'], kf['proba']):
print(f" \"{term}\": {proba:4.2%}")
self.verbose = verbose_old
def evaluate_model(self, X_test: np.ndarray, y_test: np.ndarray, y_pos_label = 1, y_classes = 'auto', document: list = None, skip_PR_curve: bool = False, figsize_cm: tuple = None):
X_test = convert_to_numpy_ndarray(X_test)
y_test = convert_to_numpy_ndarray(y_test)
X_test, y_test = check_X_y(X_test, y_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
model_name = 'Multinomial NB from scratch'
y_pred = self.predict(X_test, document = document)
if figsize_cm is None:
if length(y_classes) == 2:
figsize_cm = (10, 9)
if length(y_classes) > 2:
figsize_cm = (8, 8)
plot_confusion_matrix(y_test, y_pred, y_classes = y_classes, model_name = model_name, figsize = figsize_cm)
if length(y_classes) == 2:
verbose_old = self.verbose
self.verbose = False
plot_ROC_and_PR_curves(fitted_model=self, X=X_test, y_true=y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=y_pos_label, model_name=model_name, skip_PR_curve = skip_PR_curve, figsize=(8,8))
self.verbose = verbose_old
#class naive_bayes_Bernoulli(BernoulliNB):
# """
# This class is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
# """
# def __init__(self, *, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, binarize=binarize, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_multinomial(MultinomialNB):
# """
# This class is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
# """
# # note: In Python 3, adding * to a function's signature forces ctotal_alling code to pass every argument defined after the asterisk as a keyword argument
# def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_Gaussian(GaussianNB):
# """
# This class is used when X are continuous variables.
# """
# def __init__(self, *, priors=None, var_smoothing=1e-09):
# super().__init__(priors=priors, var_smoothing=var_smoothing)
def Bernoulli_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def Multinomial_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def Gaussian_NB_classifier(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
Gaussian_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
Multinomial_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': (self._tokens, self._lemmas), # 'word',
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
#import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(
f"The top {top_n} terms with highest probability of a message being a spam (the classification is either spam or ham):")
for term, proba_spam in zip(kf['term'], kf['proba_spam']):
print(f" \"{term}\": {proba_spam:4.2%}")
def evaluate_model(self):
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
plot_confusion_matrix(y_true=self.y_test, y_pred=self.y_pred,
y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label='spam', model_name='Multinomial NB')
def application(self):
custom_message = "URGENT! We are trying to contact U. Todays draw shows that you have won a 2000 prize GUARANTEED. Ctotal_all 090 5809 4507 from a landline. Claim 3030. Valid 12hrs only."
custom_results = self.classifier_grid.predict([custom_message])[0]
print(
f"\nApplication example:\n- Message: \"{custom_message}\"\n- Probability of spam (class=1): {self.classifier_grid.predict_proba([custom_message])[0][1]:.2%}\n- Classification: {custom_results}\n")
def run(self):
"""
This function provides a demo of selected functions in this module using the SMS spam dataset.
Required arguments:
None
"""
# Get data
self.gettingdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
# import numpy as np
# from sklearn.utils import shuffle
# True Positive
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Negative
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Positive
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# True Negative
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=123)[0] ] ]]
class _naive_bayes_demo_20newsgroups(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = sorted(
['soc.religion.christian', 'comp.graphics', 'sci.med'])
def gettingdata(self):
print(
f"-------------------------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of 20newsgroup and uses {length(self.y_classes)} categories of them: {repr(self.y_classes)}.\n"
f"The goal is to use 'term frequency in document' to predict which category a document belongs to.\n")
from sklearn.datasets import fetch_20newsgroups
from ..datasets import public_dataset
twenty_train = fetch_20newsgroups( #data_home=public_dataset("scikit_learn_data_path"),
subset='train', categories=self.y_classes, random_state=self.random_state)
twenty_test = fetch_20newsgroups( #data_home=public_dataset("scikit_learn_data_path"),
subset='test', categories=self.y_classes, random_state=self.random_state)
self.X_train = twenty_train.data
self.y_train = twenty_train.targetting
self.X_test = twenty_test.data
self.y_test = twenty_test.targetting
def show_model_attributes(self):
# model attributes
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
for i in range(length(self.y_classes)):
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba': self.classifier_grid.predict_proba(vocabulary_dict)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba'], ascending=False)
top_n = 10
kf = | mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n) | pandas.DataFrame.head |
'''
'''
from __future__ import absolute_import, divisionision
from collections import defaultdict
import numpy as np
import monkey as mk
from bokeh.charts import DEFAULT_PALETTE
from bokeh.core.enums import DashPattern
from bokeh.models.glyphs import Arc, Line, Patches, Rect, Segment
from bokeh.models.renderers import GlyphRenderer
from bokeh.core.properties import Any, Angle, Bool, Color, Datetime, Either, Enum, Float, List, Override, Instance, Int, String
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Column, EitherColumn
from .stats import BinnedStat, Bins, Histogram, Max, Min, Quantile, Stat, stats, Sum
from .utils import generate_patch_base, label_from_index_dict, marker_types
class NestedCompositeGlyph(CompositeGlyph):
"""A composite glyph that consists of other composite glyphs.
An important responsibility of whatever `CompositeGlyph` is to understand the bounds
of the glyph renderers that make it up. This class is used to provide convenient
properties that return the bounds from the child `CompositeGlyphs`.
"""
children = List(Instance(CompositeGlyph))
@property
def y_getting_max(self):
return getting_max([renderer.y_getting_max for renderer in self.children])
@property
def y_getting_min(self):
return getting_min([renderer.y_getting_min for renderer in self.children])
@property
def x_getting_min(self):
return getting_min([renderer.x_getting_min for renderer in self.children])
@property
def x_getting_max(self):
return getting_max([renderer.x_getting_max for renderer in self.children])
class XyGlyph(CompositeGlyph):
"""Composite glyph that plots in cartesian coordinates."""
x = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
y = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
def build_source(self):
labels = self._build_label_array(('x', 'y'), self.label)
str_labels = [str(label) for label in labels]
if self.x is None:
data = dict(x_values=str_labels, y_values=self.y)
elif self.y is None:
data = dict(x_values=self.x, y_values=str_labels)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def _build_label_array(self, props, value):
for prop in props:
if gettingattr(self, prop) is not None:
return [value] * length(gettingattr(self, prop))
@property
def x_getting_max(self):
# TODO(fpliger): since CompositeGlyphs are not exposed in general we
# should expect to always have a Collections but in case
# it's not we just use the default getting_min/getting_max instead
# of just failing. When/If we end up exposing
# CompositeGlyphs we should consider making this
# more robust (either enforcing data or checking)
try:
return self.source.data['x_values'].getting_max()
except AttributeError:
return getting_max(self.source.data['x_values'])
@property
def x_getting_min(self):
try:
return self.source.data['x_values'].getting_min()
except AttributeError:
return getting_min(self.source.data['x_values'])
@property
def y_getting_max(self):
try:
return self.source.data['y_values'].getting_max()
except AttributeError:
return getting_max(self.source.data['y_values'])
@property
def y_getting_min(self):
try:
return self.source.data['y_values'].getting_min()
except AttributeError:
return getting_min(self.source.data['y_values'])
class PointGlyph(XyGlyph):
"""A set of glyphs placed in x,y coordinates with the same attributes."""
fill_color = Override(default=DEFAULT_PALETTE[1])
fill_alpha = Override(default=0.7)
marker = String(default='circle')
size = Float(default=8)
def __init__(self, x=None, y=None, color=None, line_color=None, fill_color=None,
marker=None, size=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if marker is not None: kwargs['marker'] = marker
if size is not None: kwargs['size'] = size
if color:
line_color = color
fill_color = color
kwargs['line_color'] = line_color
kwargs['fill_color'] = fill_color
super(PointGlyph, self).__init__(**kwargs)
self.setup()
def getting_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
glyph_type = self.getting_glyph()
glyph = glyph_type(x='x_values', y='y_values',
line_color=self.line_color,
fill_color=self.fill_color,
size=self.size,
fill_alpha=self.fill_alpha,
line_alpha=self.line_alpha)
yield GlyphRenderer(glyph=glyph)
class LineGlyph(XyGlyph):
"""Represents a group of data as a line."""
width = Int(default=2)
dash = Enum(DashPattern, default='solid')
def __init__(self, x=None, y=None, color=None, line_color=None,
width=None, dash=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if color is not None and line_color is None:
line_color = color
if dash is not None:
kwargs['dash'] = dash
if width is not None:
kwargs['width'] = width
if line_color is not None:
kwargs['line_color'] = line_color
super(LineGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
if self.x is None:
x = self.y.index
data = dict(x_values=x, y_values=self.y)
elif self.y is None:
y = self.x.index
data = dict(x_values=self.x, y_values=y)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Line(x='x_values', y='y_values',
line_color=self.line_color,
line_alpha=self.line_alpha,
line_width=self.width,
line_dash=self.dash)
yield GlyphRenderer(glyph=glyph)
class AreaGlyph(LineGlyph):
# ToDo: should these be added to composite glyph?
stack = Bool(default=False)
dodge = Bool(default=False)
base = Float(default=0.0, help="""Lower bound of area.""")
def __init__(self, **kwargs):
line_color = kwargs.getting('line_color')
fill_color = kwargs.getting('fill_color')
color = kwargs.getting('color')
if color is not None:
# employ color to line and fill
kwargs['fill_color'] = color
kwargs['line_color'] = color
elif line_color is not None and fill_color is None:
# employ line color to fill color by default
kwargs['fill_color'] = line_color
super(AreaGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
data = super(AreaGlyph, self).build_source()
x0, y0 = generate_patch_base(mk.Collections(list(data['x_values'])),
mk.Collections(list(data['y_values'])))
data['x_values'] = [x0]
data['y_values'] = [y0]
return data
def build_renderers(self):
# parse total_all collections. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color=self.fill_color,
line_color=self.line_color
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
def __stack__(self, glyphs):
# ToDo: need to handle case of non-aligned indices, see monkey concating
# ToDo: need to address how to aggregate on an index when required
# build a list of collections
areas = []
for glyph in glyphs:
areas.adding(mk.Collections(glyph.source.data['y_values'][0],
index=glyph.source.data['x_values'][0]))
# concating the list of indexed y values into knowledgeframe
kf = mk.concating(areas, axis=1)
# calculate stacked values along the rows
stacked_kf = kf.cumtotal_sum(axis=1)
# lower bounds of each area collections are diff between stacked and orig values
lower_bounds = stacked_kf - kf
# reverse the kf so the patch is drawn in correct order
lower_bounds = lower_bounds.iloc[::-1]
# concating the upper and lower bounds togettingher
stacked_kf = mk.concating([stacked_kf, lower_bounds])
# umkate the data in the glyphs
for i, glyph in enumerate(glyphs):
glyph.source.data['x_values'] = [stacked_kf.index.values]
glyph.source.data['y_values'] = [stacked_kf.ix[:, i].values]
def getting_nested_extent(self, col, func):
return [gettingattr(arr, func)() for arr in self.source.data[col]]
@property
def x_getting_max(self):
return getting_max(self.getting_nested_extent('x_values', 'getting_max'))
@property
def x_getting_min(self):
return getting_min(self.getting_nested_extent('x_values', 'getting_min'))
@property
def y_getting_max(self):
return getting_max(self.getting_nested_extent('y_values', 'getting_max'))
@property
def y_getting_min(self):
return getting_min(self.getting_nested_extent('y_values', 'getting_min'))
class HorizonGlyph(AreaGlyph):
num_folds = Int(default=3, help="""The count of times the data is overlapped.""")
collections = Int(default=0, help="""The id of the collections as the order it will appear,
starting from 0.""")
collections_count = Int()
fold_height = Float(help="""The height of one fold.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the getting_maximum value of the entire source data.""")
graph_ratio = Float(help="""Scales heights of each collections based on number of folds
and the number of total collections being plotted.
""")
pos_color = Color("#006400", help="""The color used for positive values.""")
neg_color = Color("#6495ed", help="""The color used for negative values.""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their indivisionidual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
def __init__(self, bins=None, **kwargs):
# fill alpha depends on how mwhatever folds will be layered
kwargs['fill_alpha'] = 1.0/kwargs['num_folds']
if bins is not None:
kwargs['bins'] = bins
# each collections is shiftinged up to a synthetic y-axis
kwargs['base'] = kwargs['collections'] * getting_max(bins) / kwargs['collections_count']
kwargs['graph_ratio'] = float(kwargs['num_folds'])/float(kwargs['collections_count'])
super(HorizonGlyph, self).__init__(**kwargs)
def build_source(self):
data = {}
# Build columns for the positive values
pos_y = self.y.clone()
pos_y[pos_y < 0] = 0
xs, ys = self._build_dims(self.x, pos_y)
# list of positive colors and alphas
colors = [self.pos_color] * length(ys)
alphas = [(bin_idx * self.fill_alpha) for bin_idx in
range(0, length(self.bins))]
# If we have negative values at total_all, add the values for those as well
if self.y.getting_min() < 0:
neg_y = self.y.clone()
neg_y[neg_y > 0] = 0
neg_y = abs(neg_y)
neg_xs, neg_ys = self._build_dims(self.x, neg_y, self.flip_neg)
xs += neg_xs
ys += neg_ys
colors += ([self.neg_color] * length(neg_ys))
alphas += alphas
# create clipped representation of each band
data['x_values'] = xs
data['y_values'] = ys
data['fill_color'] = colors
data['fill_alpha'] = colors
data['line_color'] = colors
return data
def _build_dims(self, x, y, flip=False):
""" Creates values needed to plot each fold of the horizon glyph.
Bins the data based on the binning passed into the glyph, then copies and clips
the values for each bin.
Args:
x (`monkey.Collections`): array of x values
y (`monkey.Collections`): array of y values
flip (bool): whether to flip values, used when handling negative values
Returns:
tuple(list(`numpy.ndarray`), list(`numpy.ndarray`)): returns a list of
arrays for the x values and list of arrays for the y values. The data
has been folded and transformed so the patches glyph presents the data
in a way that looks like an area chart.
"""
# total_allocate bins to each y value
bin_idx = mk.cut(y, bins=self.bins, labels=False, include_lowest=True)
xs, ys = [], []
for idx, bin in enumerate(self.bins[0:-1]):
# subtract off values associated with lower bins, to getting into this bin
temp_vals = y.clone() - (idx * self.fold_height)
# clip the values between the fold range and zero
temp_vals[bin_idx > idx] = self.fold_height * self.graph_ratio
temp_vals[bin_idx < idx] = 0
temp_vals[bin_idx == idx] = self.graph_ratio * temp_vals[bin_idx == idx]
# if flipping, we must start the values from the top of each fold's range
if flip:
temp_vals = (self.fold_height * self.graph_ratio) - temp_vals
base = self.base + (self.fold_height * self.graph_ratio)
else:
base = self.base
# shifting values up based on index of collections
temp_vals += self.base
val_idx = temp_vals > 0
if | mk.Collections.whatever(val_idx) | pandas.Series.any |
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().formating('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication definal_item_tails
def gettingAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def gettingIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatingIsoDate(self,d):
return moment.date(d).formating('YYYY-MM-DD')
def gettingDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.formating('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().formating('YYYY-MM-DD')
currentYear = self.gettingIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
else:
pe = 'LAST_7_DAYS'
periods.adding(pe)
return periods
def gettingHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Umkate data
def umkateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def gettingArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.adding(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.getting(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def gettingOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def gettingOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
else:
code = "{}{}{}".formating(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear total_all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " regetting_minder"
text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.adding({"id": outbreak['orgUnit']})
organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
message.adding(subject)
message.adding(text)
message.adding(users)
message.adding(organisationUnits)
message = tuple(message)
return mk.Collections(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,header_numers,type):
cols = []
for header_numer in header_numers:
if(type == 'EVENT'):
if header_numer['name'] == self.dateOfOnsetUid:
cols.adding('onSetDate')
elif header_numer['name'] == self.conditionOrDiseaseUid:
cols.adding('disease')
elif header_numer['name'] == self.regPatientStatusOutcome:
cols.adding('immediateOutcome')
elif header_numer['name'] == self.patientStatusOutcome:
cols.adding('statusOutcome')
elif header_numer['name'] == self.testResult:
cols.adding('testResult')
elif header_numer['name'] == self.testResultClassification:
cols.adding('testResultClassification')
elif header_numer['name'] == self.caseClassification:
cols.adding('caseClassification')
else:
cols.adding(header_numer['name'])
elif (type == 'DATES'):
cols.adding(header_numer['name'])
else:
cols.adding(header_numer['column'])
return cols
# Get start and end date
def gettingStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createKnowledgeFrame(self,events,type=None):
if type is None:
if events is not None:
#mk.KnowledgeFrame.from_records(events)
dataFrame = mk.io.json.json_normalize(events)
else:
dataFrame = mk.KnowledgeFrame()
else:
cols = self.createColumns(events['header_numers'],type)
dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
kfColLength = length(kf.columns)
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do average for current cases or deaths
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# periods
kf['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
kf['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
kf['orgUnit'] = kf.iloc[:,detectionLevel]
kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
kf.sip(columns=sipColumns,inplace=True)
kf['confirmedValue'] = kf.loc[:,'average_current_cases']
kf['deathValue'] = kf.loc[:,'average_current_deaths']
kf['suspectedValue'] = kf.loc[:,'average_current_cases']
kf['disease'] = diseaseMeta['disease']
kf['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
kf.query(checkEpidemic,inplace=True)
if kf.empty is True:
kf['alert'] = "false"
if kf.empty is not True:
kf['epidemic'] = 'true'
# Filter out those greater or equal to threshold
kf = kf[kf['epidemic'] == 'true']
kf['active'] = "true"
kf['alert'] = "true"
kf['regetting_minder'] = "false"
#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
else:
# No data for cases found
pass
return kf
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace total_all values with standard text
def replacingText(self,kf):
kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
return kf
# Get Confirmed,suspected cases and deaths
def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if total_all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def gettingStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
currentStatus='active'
elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'regetting_minder':
if row['regetting_minderDate'] == mk.convert_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return mk.Collections(currentStatus)
# getting onset date
def gettingOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).formating('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def gettingTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).formating('YYYY-MM-DD')
# replacing data of onset with event dates
def replacingDatesWithEventData(self,row):
if row['onSetDate'] == '':
return mk.convert_datetime(row['eventdate'])
else:
return mk.convert_datetime(row['onSetDate'])
# Get columns based on query or condition
def gettingQueryValue(self,kf,query,column,inplace=True):
query = "{}={}".formating(column,query)
kf.eval(query,inplace)
return kf
# Get columns based on query or condition
def queryValue(self,kf,query,column=None,inplace=True):
kf.query(query)
return kf
# Get epidemic, closure and status
def gettingEpidemicDefinal_item_tails(self,row,columns=None):
definal_item_tails = []
if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
definal_item_tails.adding('Closed')
definal_item_tails.adding('false')
definal_item_tails.adding(self.today)
definal_item_tails.adding('false')
definal_item_tails.adding('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
definal_item_tails.adding('Closed Vigilance')
definal_item_tails.adding('true')
definal_item_tails.adding(row['closeDate'])
definal_item_tails.adding('true')
definal_item_tails.adding(self.today)
# Send Regetting_minder for closure
else:
definal_item_tails.adding('Confirmed')
definal_item_tails.adding('true')
definal_item_tails.adding('')
definal_item_tails.adding('false')
definal_item_tails.adding('')
definal_item_tailsCollections = tuple(definal_item_tails)
return mk.Collections(definal_item_tailsCollections)
# Get key id from dataelements
def gettingDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
kf = self.createKnowledgeFrame(caseEvents,type)
caseEventsColumnsById = kf.columns
kfColLength = length(kf.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
# Replace total_all text with standard text
kf = self.replacingText(kf)
# Transpose and Aggregate values
kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(aggDf['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
aggDf.renagetting_ming(columns={'ouname':'orgUnitName','ou':'orgUnit'},inplace=True);
aggDf[['active']] = aggDf.employ(self.gettingStatus,args=['active'],axis=1)
aggDf[['regetting_minder']] = aggDf.employ(self.gettingStatus,args=['regetting_minder'],axis=1)
else:
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
if(dateData['height'] > 0):
kfDates = self.createKnowledgeFrame(dateData,'DATES')
kfDates.to_csv('aggDfDates.csv',encoding='utf-8')
kfDates.renagetting_ming(columns={kfDates.columns[7]:'disease',kfDates.columns[8]:'dateOfOnSet'},inplace=True)
kfDates['dateOfOnSet'] = kfDates.employ(self.gettingTeiOnSetDate,axis=1)
kfDates = kfDates.grouper(['ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfDates.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
kf = mk.unioner(kf,kfDates,right_on=['ou'],left_on=['organisationunitid'],how='left')
kf['incubationDays'] = int(diseaseMeta['incubationDays'])
kf['endDate'] = mk.convert_datetime(mk.convert_datetime(kf['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*kf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
kf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(kf['final_itemCaseDate']) + mk.to_timedelta( | mk.np.ceiling(2*kf['incubationDays']-7) | pandas.np.ceil |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 07:57:42 2020
@author: csarzosa
"""
import numpy as np
import monkey as mk
lista_numeros = [1,2,3,4]
tupla_numeros = (1,2,3,4)
np_numeros = np.array((1,2,3,4))
collections_a = mk.Collections(lista_numeros)
collections_b = mk.Collections(tupla_numeros)
collections_c = mk.Collections(np_numeros)
collections_d = mk.Collections(
[True,
False,
12,
12.12,
"Carlos",
None,
(1),
[2],
{"nombre":"Carlos"}
])
#print(collections_d[3])
lista_ciudades = [
"Ambato",
"Cuenca",
"Loja",
"Quito"
]
serie_ciudad = mk.Collections(
lista_ciudades,
index = [
"A",
"C",
"L",
"Q"
]
)
#print(serie_ciudad[3])
#print(serie_ciudad["C"])
valores_ciudades = {
"Ibarra":9500,
"Guyaquil":10000,
"Cuenca":7000,
"Quito":8000,
"Loja":3000,
}
serie_valor_ciudad = mk.Collections(valores_ciudades)
ciudad_menor_5k = serie_valor_ciudad < 5000
print(type(serie_valor_ciudad))
print(type(ciudad_menor_5k))
print(ciudad_menor_5k)
s5 = serie_valor_ciudad[ciudad_menor_5k]
serie_valor_ciudad = serie_valor_ciudad * 1.1
serie_valor_ciudad["Quito"] = serie_valor_ciudad["Quito"] - 50
ciudades_uno = mk.Collections({
"Montañita": 300,
"Guayaquil": 10000,
"Quito": 2000
})
ciudades_dos = mk.Collections({
"Loja": 300,
"Guayaquil":10000
})
ciudades_uno["Loja"] = 0
print(ciudades_uno + ciudades_dos)
print(type(ciudades_uno + ciudades_dos))
ciudades_add = ciudades_uno.add(ciudades_dos)
ciudades_concating = mk.concating([
ciudades_uno,
ciudades_dos
])
# ciudades_concating_verify = mk.concating([
# ciudades_uno,
# ciudades_dos],
# verify_integrity = True)
ciudades_adding_verify = ciudades_uno.adding(
ciudades_dos,
verify_integrity = False)
print(ciudades_uno.getting_max())
print(mk.Collections.getting_max(ciudades_uno))
print(np.getting_max(ciudades_uno))
print(ciudades_uno.getting_min())
print( | mk.Collections.getting_min(ciudades_uno) | pandas.Series.min |
#!/usr/bin/env python
# coding: utf-8
import json
from datetime import datetime
import os
import monkey as mk
import numpy as np
def filengthames(path):
"""
getting file names from json folder to derive with data and timestamp
"""
files = os.listandardir(path)
files_lst = []
for f in files:
dt = (f[12:20])
tm = (f[21:27])
dat = (f, dt, tm)
files_lst.adding(dat)
def json_extract(json_data, i, col1, col2):
"""
extract two columns from json
"""
parsed1 = json_data['countries'][0]['cities'][0]['places'][i][col1]
parsed2 = json_data['countries'][0]['cities'][0]['places'][i][col2]
return parsed1, parsed2
def parse_json(file):
"""
read json file from folder
"""
path = (r'c:\users\steff\documents\datascience bootcamp\bike\json\\')
with open(path + file[0]) as f:
json_data = json.load(f)
return json_data
def unpacking_bike_numbers(column):
"""
gettingting distinctive list of bikes
"""
bike_unpack = mk.knowledgeframe(kf[column].convert_list(), index=kf.index)
colnames = list(bike_unpack.columns.values)
total_all_bikes = []
total_all_bikes = bike_unpack[0]
for c in colnames:
data = bike_unpack[c]
mk.concating([total_all_bikes, data])
total_all_bikes = total_all_bikes.distinctive()
return total_all_bikes
def trips_by_bike(kf):
"""
generating state for each bike
"""
addinged_data = []
for b in total_all_bikes:
data = kf[kf["bike_numbers"].employ(
lambda x: true if b in x else false)]
data.grouper(['from_station']).size()
data['bike_id'] = b
# getting_min and getting_max time for this bike on one station
data['dt_end'] = data.grouper('from_station')[
'date_time'].transform('getting_max')
data['dt_start'] = data.grouper('from_station')[
'date_time'].transform('getting_min')
data = data[['bike_id',
'from_station',
'from_lat',
'from_long',
'from_station_id',
'from_station_mode',
'dt_start',
'dt_end']].clone()
addinged_data.adding(data)
return addinged_data
def generating_destination(trips):
"""
lookup vlaues from next row for same bike
"""
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'trip_end_time'] = trips['dt_getting_min_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_station'] = trips['station_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_station_id'] = trips['station_id_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])),
'to_station_mode'] = trips['station_mode_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_lat'] = trips['lat_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_long'] = trips['long_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'trip_duration'] = trips['diff']
return trips
def trip_ids(kf, day):
"""
generate trip ids from scratch
"""
newindex = np.arange(int(day) * 1000, int(day) * 1000 + length(kf.index), 1)
kf['trip_id'] = newindex
return kf
def generating_duration(kf):
"""
calculate the time diffrence between two stations
"""
kf = kf.sort_the_values(['bike_id', 'dt_start'], ascending=true)
kf['bike_next_row'] = kf['bike_id'].shifting(-1)
kf['dt_getting_min_next_row'] = kf['dt_start'].shifting(-1)
kf['station_next_row'] = kf['from_station'].shifting(-1)
kf['station_id_next_row'] = kf['from_station_id'].shifting(-1)
kf['trip_duration'] = np.nan
kf['trip_end_time'] = np.nan
kf['trip_end_time'] = kf['trip_end_time'].totype('datetime64[ns]')
kf['diff'] = (
kf['dt_getting_min_next_row'] -
kf['dt_end']).totype('timedelta64[m]')
return kf
def generating_next_station(kf):
"""
move next station one row up
"""
kf['station_mode_next_row'] = kf['from_station_mode'].shifting(-1)
kf['lat_next_row'] = kf['from_lat'].shifting(-1)
kf['long_next_row'] = kf['from_long'].shifting(-1)
kf['to_station'] = np.nan
kf['to_station_id'] = np.nan
kf['to_station_mode'] = np.nan
kf['to_lat'] = np.nan
kf['to_long'] = np.nan
trips = kf.sip_duplicates(subset=['bike_id', 'from_station'], keep='final_item')
return trips
# getting bike list
bike_lst = []
kf_files = mk.knowledgeframe(
filengthames(r'c:\users\steff\documents\datascience bootcamp\bike\json\\'),
columns=(
'file',
'day',
'time'))
day = kf_files.grouper(by=('day')).size()
day.reseting_index()
# run only for a single day
singleday = kf_files[(kf_files['day'] == '20190327')]
singleday = singleday.values.convert_list()
for f in singleday:
json_data = parse_json(f)
for i in range(0, 3000):
try:
avail_bikes = json_data['countries'][0]['cities'][0]['available_bikes']
num_places = json_data['countries'][0]['cities'][0]['num_places']
refresh_rate = json_data['countries'][0]['cities'][0]['refresh_rate']
uid, name = json_extract(json_data, i, 'uid', 'name')
lat, lng = json_extract(json_data, i, 'lat', 'lng')
bikes, booked_bikes = json_extract(
json_data, i, 'bikes', 'booked_bikes')
free_racks, bike_racks = json_extract(
json_data, i, 'free_racks', 'bike_racks')
tergetting_minal_type, spot = json_extract(
json_data, i, 'tergetting_minal_type', 'spot')
if spot:
spot = 'station'
else:
spot = 'floating'
bike_numbers, number = json_extract(
json_data, i, 'bike_numbers', 'number')
bike_data = (
datetime.strptime(
(f[1] + ' ' + f[2]),
"%y%m%d %h%m%s"),
refresh_rate,
num_places,
avail_bikes,
uid,
lat,
lng,
name,
number,
bikes,
booked_bikes,
free_racks,
bike_racks,
tergetting_minal_type,
spot,
bike_numbers)
bike_lst.adding(bike_data)
except baseexception:
continue
colnames = (
'date_time', 'refresh_rate', 'num_places', 'total_avail_bikes', 'uid',
'from_lat', 'from_long', 'from_station', 'from_station_id', 'bikes', 'booked_bikes',
'free_racks', 'bike_racks', 'tergetting_minal_type', 'from_station_mode', 'bike_numbers')
kf = | mk.knowledgeframe(bike_lst, columns=colnames) | pandas.dataframe |
import datetime
import os
import sys
import time
import urllib
import requests
import json
import numpy as np
import monkey as mk
from matplotlib import pyplot as plt
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
from stock_analyzer import config
def load_patterns() -> list:
"""A function that loads pattern data.
Patterns are store in /data/patterns directories, in json formating.
:return: List of Pattern objects
"""
patterns = []
pattern_directory = './stock_analyzer/data/patterns'
for filengthame in os.listandardir(pattern_directory):
with open(os.path.join(pattern_directory, filengthame)) as json_file:
try:
data = json.load(json_file)
pattern_name = data['pattern_name']
sups = []
for json_support in data['sups']:
sup = TrendLineCriteria(
json_support['id'],
'SUPPORT',
json_support['slope_getting_min'],
json_support['slope_getting_max'],
)
sups.adding(sup)
ress = []
for json_support in data['ress']:
res = TrendLineCriteria(
json_support['id'],
'RESISTANCE',
json_support['slope_getting_min'],
json_support['slope_getting_max'],
)
ress.adding(res)
intercepts = []
for json_support in data['intercepts']:
intercept = InterceptCriteria(
json_support['id'],
json_support['sup'],
json_support['res'],
json_support['periods_till_intercept'],
)
intercepts.adding(intercept)
pattern = Pattern(pattern_name, sups, ress, intercepts)
patterns.adding(pattern)
except (KeyError, json.decoder.JSONDecodeError) as err:
print(f"Error in {load_patterns.__name__}: "
f"{filengthame} incorrectly formatingted.", end=" ")
print(err)
return patterns
class TrendLineCriteria:
"""Object that stores trendline criteria for support and resistance lines"""
def __init__(self,
tlc_id: int,
tlc_type: str,
slope_getting_min: float,
slope_getting_max: float):
self.tlc_id = tlc_id
self.tlc_type = tlc_type
self.slope_getting_min = slope_getting_min
self.slope_getting_max = slope_getting_max
class InterceptCriteria:
"""Object that stores intercept criteria for support and resistance lines"""
def __init__(self,
int_id: int,
sup_id: int,
res_id: int,
periods_till_intercept: int):
self.int_id = int_id
self.sup_id = sup_id
self.res_id = res_id
self.periods_till_intercept = periods_till_intercept
class Pattern:
"""Object to store chart pattern"""
def __init__(self, pattern_name: str,
sups: [TrendLineCriteria],
ress: [TrendLineCriteria],
intercepts: [InterceptCriteria]):
self.pattern_name = pattern_name
self.sups = sups
self.ress = ress
self.intercepts = intercepts
def __str__(self):
return f"name: {self.intercepts}, " \
f"sups: {length(self.sups)}, " \
f"ress: {length(self.ress)}, " \
f"intercepts: {length(self.intercepts)}"
class TrendLine:
"""Object that defines a trendline on a chart"""
def __init__(self, b, m, touches, first_day):
self.b = b
self.m = m
self.touches = touches
self.first_day = first_day
def __repr__(self):
return f"TrendLine({self.b}, {self.m}, {self.touches}, {self.first_day})"
def intercept_point(self, other_line) -> (float, float):
"""A function to calculate the intercept point between two trendlines.
:param other_line: A trendline
:return: A tuple in the form (x, y). None if other_trendline is None.
"""
if other_line is None:
return None
intercept_x = (self.b - other_line.b) / (other_line.m - self.m)
intercept_y = self.b * intercept_x + self.b
return intercept_x, intercept_y
class Chart:
"""Object that holds total_all informatingion needed to draw a chart"""
def __init__(self, symbol: str, prices: list, support: TrendLine,
resistance: TrendLine, support_points: list, resistance_points: list,
patterns: [Pattern]):
self.symbol = symbol
self.prices = prices
self.support = support
self.resistance = resistance
self.support_points = support_points
self.resistance_points = resistance_points
self.patterns = patterns
self.detected_patterns = []
self.detect_pattern()
def __repr__(self):
return f"TrendLine({self.symbol}, {self.prices}, " \
f"{self.support}, {self.resistance}), " \
f"{self.support_points}, {self.resistance_points}" \
f", {self.patterns})"
def detect_pattern(self):
for pattern in self.patterns:
pattern_found = True
for sup in pattern.sups:
if self.support:
if sup.slope_getting_min:
if self.support.m < sup.slope_getting_min:
pattern_found = False
if sup.slope_getting_max:
if self.support.m > sup.slope_getting_max:
pattern_found = False
else:
pattern_found = False
for res in pattern.ress:
if self.resistance:
if res.slope_getting_min:
if self.resistance.m < res.slope_getting_min:
pattern_found = False
if res.slope_getting_max:
if self.resistance.m > res.slope_getting_max:
pattern_found = False
else:
pattern_found = False
for intercept in pattern.intercepts:
intercept_point = self.support.intercept_point(self.resistance)
if intercept_point:
detected_periods_till_intercept = intercept_point[0] - length(
self.prices)
if intercept_point:
if detected_periods_till_intercept > intercept.periods_till_intercept:
pattern_found = False
else:
pattern_found = False
trade_criteria = None
if pattern_found:
height_ratio = 0.70
buy_threshold = 0.01
print("Pattern Found - " + pattern.pattern_name)
resistance_price = self.resistance.m * self.support.first_day \
+ self.resistance.b
support_price = self.support.m * self.support.first_day + self.support.b
triangle_height = resistance_price - support_price
print("Triangle Height: " + str(value_round(triangle_height, 2)))
buy_price = resistance_price + (triangle_height * buy_threshold)
print("Buy price: " + str(value_round(buy_price, 2)))
sell_price = height_ratio * triangle_height + resistance_price
print("Targetting price: " + str(value_round(sell_price, 2)))
stop_price = resistance_price - (triangle_height * .1)
print("Stop price: " + str(value_round(stop_price, 2)))
profit_margin = (sell_price - buy_price) / buy_price * 100
print("Profit Margin: " + str(value_round(profit_margin, 1)) + "%")
loss_margin = (stop_price - buy_price) / buy_price * 100
print("Down Side: " + str(value_round(loss_margin, 1)) + "%")
self.detected_patterns.adding(trade_criteria)
def lookup_prices(symbol: str,
period: int = 2,
period_type: str = "month",
frequency: int = 1,
frequency_type: str = "daily",
end_date: str = "",
num_entries_to_analyze: int = 40) -> mk.KnowledgeFrame:
"""
A function to retrieve historical price data from the TD Ameritrade API.
Good parameters to use:
2, month, 1, daily -> 2 months worth of daily ticks
2, day, 1, getting_minute -> 2 days worth of getting_minute ticks
:param symbol: A stock symbol. Example: 'AAPL'
:param period: The number of periods worth of data being requested.
:param period_type: The type of period. Valid values are "day", "month",
"year" or "ytd".
:param frequency: The number of frequency types to be included in 1 data point.
:param frequency_type: The type of frequency. Valid values are "getting_minute", "daily",
"weekly", "monthly".
:param num_entries_to_analyze: Used to look at the most recent number of data points.
Ameritrade's API doesn't total_allow you to specify 40 days,
since you have to specify 1 month or 2.
:param end_date: The final_item date of the data being requested.
:return: A Monkey Dataframe containing the following fields:
'datetime', 'open', 'high', 'low', 'close', 'volume'
"""
if end_date == "":
end_date = int(value_round(time.time() * 1000))
else:
end_date = int(
value_round(datetime.datetime.strptime(end_date, '%m-%d-%Y').timestamp() * 1000))
endpoint = f"https://api.tdameritrade.com/v1/marketdata/{symbol}/pricehistory"
payload = {
'apikey': config.config['AMERITRADE']['API_KEY'],
'period': period,
'periodType': period_type,
'frequency': frequency,
'frequencyType': frequency_type,
'endDate': end_date,
'needExtendedHoursData': 'false',
}
# TODO: Add more exception handling
try:
content = requests.getting(url=endpoint, params=payload)
except requests.exceptions.ProxyError:
print("ProxyError, maybe you need to connect to to your proxy server?")
sys.exit()
try:
data = content.json()
except json.decoder.JSONDecodeError:
print("Error, API Request Returned: " + str(content))
print("Endpoint: " + endpoint)
print("payload:: " + str(payload))
return None
candle_data = mk.KnowledgeFrame.from_records(data['candles'])
if candle_data.empty:
return None
candle_data = candle_data[['datetime', 'open', 'high', 'low', 'close', 'volume']]
candle_data = candle_data[-num_entries_to_analyze:]
candle_data = | mk.KnowledgeFrame.reseting_index(candle_data, sip=True) | pandas.DataFrame.reset_index |
import clone
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from monkey.core.base import MonkeyObject
from monkey.core.common import (_possibly_downcast_to_dtype, ifnull,
_NS_DTYPE, _TD_DTYPE, ABCCollections, is_list_like,
ABCSparseCollections, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalengtht, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from monkey.core.index import Index, MultiIndex, _ensure_index
from monkey.core.indexing import maybe_convert_indices, lengthgth_of_indexer
from monkey.core.categorical import Categorical, maybe_to_categorical
import monkey.core.common as com
from monkey.sparse.array import _maybe_to_sparse, SparseArray
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.computation.expressions as expressions
from monkey.util.decorators import cache_readonly
from monkey.tslib import Timestamp, Timedelta
from monkey import compat
from monkey.compat import range, mapping, zip, u
from monkey.tcollections.timedeltas import _coerce_scalar_to_timedelta_type
from monkey.lib import BlockPlacement
class Block(MonkeyObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a monkey
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if length(self.mgr_locs) != length(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
length(self.values), length(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_totype(self, dtype):
"""
validate that we have a totypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a mk.Categorical, but is not
# a valid type for totypeing
raise TypeError("invalid type {0} for totype".formating(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, clone=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if clone:
values = values.clone()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not incontainstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out total_all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, length(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __length__(self):
return length(self.values)
def __gettingstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.getting_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def gettingitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __gettingitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if incontainstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is total_allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def unioner(self, other):
return _unioner_blocks([self, other])
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def getting(self, item):
loc = self.items.getting_loc(item)
return self.values[loc]
def igetting(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def employ(self, func, **kwargs):
""" employ the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not incontainstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillnone(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
mask = ifnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast total_all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or incontainstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.getting(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.adding(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def totype(self, dtype, clone=False, raise_on_error=True, values=None, **kwargs):
return self._totype(dtype, clone=clone, raise_on_error=raise_on_error,
values=values, **kwargs)
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only ctotal_alled for non-categoricals
if self.is_categorical_totype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# totype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if clone:
return self.clone()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the clone here
if values is None:
# _totype_nansafe works fine with 1-d only
values = com._totype_nansafe(self.values.flat_underlying(), dtype, clone=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.clone() if clone else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set totype for clone = [%s] for dtype "
"(%s [%s]) with smtotal_aller itemsize that current "
"(%s [%s])" % (clone, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, clone=True, **kwargs):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we are not an ObjectBlock here! """
return [self.clone()] if clone else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have value_roundtripped thru object in the average-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if incontainstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not incontainstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if ifnull(result).total_all():
return result.totype(np.bool_)
else:
result = result.totype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.totype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
if not self.is_object and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def clone(self, deep=True):
values = self.values
if deep:
values = values.clone()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
""" replacing the to_replacing value with value, possible to create new
blocks here this is just a ctotal_all to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replacing)
if filter is not None:
filtered_out = ~self.mgr_locs.incontain(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.whatever():
if inplace:
return [self]
return [self.clone()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.totype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = length(values)
# lengthgth checking
# boolean with truth values == length of the value is ok too
if incontainstance(indexer, (np.ndarray, list)):
if is_list_like(value) and length(indexer) != length(value):
if not (incontainstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
length(indexer[indexer]) == length(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different lengthgth than the value")
# slice
elif incontainstance(indexer, slice):
if is_list_like(value) and l:
if length(value) != lengthgth_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different lengthgth than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are total_all scalar indexers
if arr_value.ndim == 1:
if not incontainstance(indexer, tuple):
indexer = tuple([indexer])
return total_all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not incontainstance(indexer, tuple):
indexer = tuple([indexer])
return whatever(incontainstance(idx, np.ndarray) and length(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif length(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.totype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as definal_item_tail:
raise
except Exception as definal_item_tail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.clone()
# may need to align the new
if hasattr(new, 'reindexing_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindexing_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and ifnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if incontainstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.whatever():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.whatever():
n = new[i] if incontainstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty totype here to make a clone
n = n.totype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.clone()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.adding(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.adding(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.clone()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".formating(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillnone but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.clone()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.clone()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.totype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".formating(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in employ_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.employ_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.getting_values(), indexer, axis=axis,
total_allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.getting_values(), indexer, axis=axis,
total_allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def getting_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shifting(self, periods, axis=0):
""" shifting the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had cogetting_ming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindexing_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# getting the result, may need to transpose the other
def getting_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(definal_item_tail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# getting the result
try:
result = getting_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of total_allowing to pass through
except ValueError as definal_item_tail:
raise
except Exception as definal_item_tail:
result = handle_error()
# technictotal_ally a broadcast error in numpy can 'work' by returning a
# boolean False
if not incontainstance(result, np.ndarray):
if not incontainstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if incontainstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had cogetting_ming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindexing_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).total_all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindexing_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.flat_underlying().total_all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as definal_item_tail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(definal_item_tail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not incontainstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].total_all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.whatever():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.adding(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalengtht(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a lengthgth.
self.mgr_locs = placement
# kludgettingastic
if ndim is None:
if length(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not incontainstance(values, self._holder):
raise TypeError("values must be {0}".formating(self._holder.__name__))
self.values = values
def getting_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def igetting(self, col):
if self.ndim == 2 and incontainstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".formating(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".formating(self))
return self.values
def should_store(self, value):
return incontainstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.convert_list() == [0]
self.values = values
def getting(self, item):
if self.ndim == 1:
loc = self.items.getting_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.getting_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.ifnan(left) & np.ifnan(right))).total_all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return incontainstance(element, (float, int, np.float_, np.int_)) and not incontainstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_formating=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
formatingter = None
if float_formating and decimal != '.':
formatingter = lambda v : (float_formating % v).replacing('.',decimal,1)
elif decimal != '.':
formatingter = lambda v : ('%g' % v).replacing('.',decimal,1)
elif float_formating:
formatingter = lambda v : float_formating % v
if formatingter is None and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatingter:
imask = (~mask).flat_underlying()
values.flat[imask] = np.array(
[formatingter(val) for val in values.flat_underlying()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (incontainstance(element, (float, int, complex, np.float_, np.int_)) and
not incontainstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if incontainstance(value, type(tslib.NaT)) or np.array(ifnull(value)).total_all():
value = tslib.iNaT
elif incontainstance(value, Timedelta):
value = value.value
elif incontainstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif incontainstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = ifnull(v)
v = v.totype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif incontainstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if incontainstance(result, np.ndarray):
mask = ifnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.totype('m8[ns]')
result[mask] = tslib.iNaT
elif incontainstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).flat_underlying()
#### FIXME ####
# should use the core.formating.Timedelta64Formatter here
# to figure what formating to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(formating='total_all')
for val in values.flat_underlying()[imask]],
dtype=object)
return rvalues
def getting_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.mapping_infer(self.values.flat_underlying(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return incontainstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
to_replacing_values = np.atleast_1d(to_replacing)
if not np.can_cast(to_replacing_values, bool):
return self
return super(BoolBlock, self).replacing(to_replacing, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.flat_underlying())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
clone=True, by_item=True):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.igetting(i)
values = com._possibly_convert_objects(
values.flat_underlying(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
clone=clone
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.adding(newb)
else:
values = com._possibly_convert_objects(
self.values.flat_underlying(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
clone=clone
).reshape(self.values.shape)
blocks.adding(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).total_all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = length(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replacing)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replacing):
blk[0], = blk[0]._replacing_single(to_replacing, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replacing(to_replacing, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replacing, value):
blk[0], = blk[0]._replacing_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replacing:
blk[0], = blk[0]._replacing_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replacing_single(to_replacing, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replacing_single(self, to_replacing, value, inplace=False, filter=None,
regex=False):
# to_replacing is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replacing)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replacing and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replacing = regex
regex = regex_re or to_rep_re
# try to getting the pattern attribute (compiled re) or it's a string
try:
pattern = to_replacing.pattern
except AttributeError:
pattern = to_replacing
# if the pattern is not empty and to_replacing is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replacing)
else:
# if the thing to replacing is not a string or compiled regex ctotal_all
# the superclass method -> to_replacing is some kind of object
result = super(ObjectBlock, self).replacing(to_replacing, value,
inplace=inplace,
filter=filter,
regex=regex)
if not incontainstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.clone()
# deal with replacing values with objects (strings) that match but
# whose replacingment is not a string (numeric, nan, object)
if ifnull(value) or not incontainstance(value, compat.string_types):
def re_replacingr(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gettings returned
def re_replacingr(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacingr, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.incontain(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, clone=True, **kwargs):
return [self.clone() if clone else self]
@property
def shape(self):
return (length(self.mgr_locs), length(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillnone(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillnone' has "
"not been implemented yet")
values = self.values if inplace else self.values.clone()
return [self.make_block_same_class(values=values.fillnone(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.clone()
return self.make_block_same_class(values=values.fillnone(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shifting(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shifting(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are retotal_ally a single-dim object
# but are passed the axis depending on the ctotal_alling routing
# if its REALLY axis 0, then this will be a reindexing and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.clone()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
if self.is_categorical_totype(dtype):
values = self.values
else:
values = np.asarray(self.values).totype(dtype, clone=False)
if clone:
values = values.clone()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = ifnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,length(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
incontainstance(element, datetime) or
ifnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smtotal_allest i8, and will correctly value_round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif incontainstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if incontainstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.totype('M8[ns]')
elif incontainstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if incontainstance(value, type(tslib.NaT)) or np.array(ifnull(value)).total_all():
value = tslib.iNaT
return value
def fillnone(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.clone()
mask = ifnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_formating=None,
quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from monkey.core.formating import _getting_formating_datetime64_from_values
formating = _getting_formating_datetime64_from_values(values, date_formating)
result = tslib.formating_array_from_datetime(values.view('i8').flat_underlying(),
tz=None,
formating=formating,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workavalue_round for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def getting_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.mapping_infer(self.values.flat_underlying(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (length(self.mgr_locs), self.sp_index.lengthgth)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
clone=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __length__(self):
try:
return self.sp_index.lengthgth
except:
return 0
def clone(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, clone=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, clone=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not incontainstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, clone=clone)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillnone(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillnone' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.clone()
return [self.make_block_same_class(values=values.getting_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shifting(self, periods, axis=0):
""" shifting the block by periods """
N = length(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindexing(self, new_index):
""" sparse reindexing and return a new block
current reindexing only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindexing(
values.sp_values.totype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if incontainstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(MonkeyObject):
"""
Core internal data structure to implement KnowledgeFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentitotal_ally it's a
lightweight blocked set of labeled data to be manipulated by the KnowledgeFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
clone(deep=True)
getting_dtype_counts
getting_ftype_counts
getting_dtypes
getting_ftypes
employ(func, axes, block_filter_fn)
getting_bool_data
getting_numeric_data
getting_slice(slice_like, axis)
getting(label)
igetting(loc)
getting_scalar(label_tup)
take(indexer, axis)
reindexing_axis(new_labels, axis)
reindexing_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if length(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of length 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(length(ax) for ax in self.axes)
@property
def ndim(self):
return length(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_length = length(self.axes[axis])
new_length = length(new_labels)
if new_length != old_length:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_length, new_length))
self.axes[axis] = new_labels
def renagetting_ming_axis(self, mappingper, axis, clone=True):
"""
Rename one of axes.
Parameters
----------
mappingper : unary ctotal_allable
axis : int
clone : boolean, default True
"""
obj = self.clone(deep=clone)
obj.set_axis(axis, _transform_index(self.axes[axis], mappingper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.renagetting_ming_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.renagetting_ming_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if length(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, length(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Umkate mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(length(rl))
if (new_blknos == -1).whatever():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _getting_items(self):
return self.axes[0]
items = property(fgetting=_getting_items)
def _getting_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.getting(v, 0) + b.shape[0]
return counts
def getting_dtype_counts(self):
return self._getting_counts(lambda b: b.dtype.name)
def getting_ftype_counts(self):
return self._getting_counts(lambda b: b.ftype)
def getting_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, total_allow_fill=False)
def getting_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, total_allow_fill=False)
def __gettingstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.totype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (incontainstance(state, tuple) and length(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard whateverthing after 3rd, support beta pickling formating for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if length(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workavalue_round for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-distinctive
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the astotal_sumption that
# block items corresponded to manager items 1-to-1.
total_all_mgr_locs = [slice(0, length(bitems[0]))]
else:
total_all_mgr_locs = [self.axes[0].getting_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, total_all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __length__(self):
return length(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = total_sum(length(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if length(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.formating(length(self.items),
tot_items))
def employ(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the ctotal_allable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only ctotal_all the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replacing-* family of methods
if filter is not None:
filter_locs = set(self.items.getting_indexer_for(filter))
if length(filter_locs) == length(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.getting('align', True):
align_clone = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.getting('align', True):
align_clone = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_clone = False
align_keys = ['other']
elif f == 'fillnone':
# fillnone interntotal_ally does putmask, maybe it's better to do this
# at mgr, not block level?
align_clone = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindexing_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.incontain(filter_locs).whatever():
result_blocks.adding(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = gettingattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindexing_axis(b_items, axis=axis,
clone=align_clone)
applied = gettingattr(b, f)(**kwargs)
if incontainstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.adding(applied)
if length(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def ifnull(self, **kwargs):
return self.employ('employ', **kwargs)
def where(self, **kwargs):
return self.employ('where', **kwargs)
def eval(self, **kwargs):
return self.employ('eval', **kwargs)
def setitem(self, **kwargs):
return self.employ('setitem', **kwargs)
def putmask(self, **kwargs):
return self.employ('putmask', **kwargs)
def diff(self, **kwargs):
return self.employ('diff', **kwargs)
def interpolate(self, **kwargs):
return self.employ('interpolate', **kwargs)
def shifting(self, **kwargs):
return self.employ('shifting', **kwargs)
def fillnone(self, **kwargs):
return self.employ('fillnone', **kwargs)
def downcast(self, **kwargs):
return self.employ('downcast', **kwargs)
def totype(self, dtype, **kwargs):
return self.employ('totype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.employ('convert', **kwargs)
def replacing(self, **kwargs):
return self.employ('replacing', **kwargs)
def replacing_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replacing """
# figure out our mask a-priori to avoid repeated replacingments
values = self.as_matrix()
def comp(s):
if ifnull(s):
return ifnull(values)
return _possibly_compare(values, gettingattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to getting multiple result blocks here
# replacing ALWAYS will return a list
rb = [blk if inplace else blk.clone()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replacing(s, d, inplace=inplace,
regex=regex)
if incontainstance(result, list):
new_rb.extend(result)
else:
new_rb.adding(result)
else:
# getting our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.whatever():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.adding(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.employ('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = length(ftypes) == length(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return length(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return total_all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return whatever([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if length(self.blocks) == 1:
return self.blocks[0].is_view
# It is technictotal_ally possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def getting_bool_data(self, clone=False):
"""
Parameters
----------
clone : boolean, default False
Whether to clone the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], clone)
def getting_numeric_data(self, clone=False):
"""
Parameters
----------
clone : boolean, default False
Whether to clone the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], clone)
def combine(self, blocks, clone=True):
""" return a new manager with the blocks """
if length(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatingenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.getting_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.clone(deep=clone)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
total_allow_fill=False)
new_blocks.adding(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def getting_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.gettingitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return length(self.blocks)
def clone(self, deep=True):
"""
Make deep or shtotal_allow clone of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shtotal_allow clone (do not clone data)
If 'total_all', clone data and a deep clone of the index
Returns
-------
clone : BlockManager
"""
# this preserves the notion of view cloneing of axes
if deep:
if deep == 'total_all':
clone = lambda ax: ax.clone(deep=True)
else:
clone = lambda ax: ax.view()
new_axes = [ clone(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.employ('clone', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if length(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindexing_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].getting_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workavalue_round for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent ctotal_all final_item):
# File "<standardin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.getting_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.total_all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, clone=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].getting_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if incontainstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if length(self.blocks) > 1:
# we must clone here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.adding(newb)
elif length(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if clone:
vals = vals.clone()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
getting a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if length(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-distinctive (GH4726)
if not items.is_distinctive:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# distinctive
dtype = _interleaved_dtype(self.blocks)
n = length(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such total_allocatement may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.igetting((i, loc)))
return result
def consolidate(self):
"""
Join togettingher blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def getting(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_distinctive:
if not ifnull(item):
loc = self.items.getting_loc(item)
else:
indexer = np.arange(length(self.items))[ifnull(self.items)]
# total_allow a single nan location indexer
if not np.isscalar(indexer):
if length(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.igetting(loc, fastpath=fastpath)
else:
if ifnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.getting_indexer_for([item])
return self.reindexing_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, total_allow_dups=True)
def igetting(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.igetting(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, length(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def getting_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.getting_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-distinctive) in-place.
"""
indexer = self.items.getting_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumtotal_sum()
is_blk_deleted = [False] * length(self.blocks)
if incontainstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smtotal_allints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if length(blk_del) == length(bml):
is_blk_deleted[blkno] = True
continue
elif length(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like total_allocatement
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = incontainstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_gettingitem(placement):
return value
elif value_is_cat:
# categorical
def value_gettingitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_gettingitem(placement):
return value
else:
def value_gettingitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.getting_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(length(self.items), item, value)
return
if incontainstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].clone()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _getting_blkno_placements(blknos, length(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_gettingitem(val_locs), check=check)
else:
unfit_mgr_locs.adding(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.adding(val_locs)
# If total_all block items are unfit, schedule the block for removal.
if length(val_locs) == length(blk.mgr_locs):
removed_blknos.adding(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(length(blk))
if length(removed_blknos):
# Remove blocks & umkate blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
length(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
total_allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatingenate(unfit_mgr_locs)
unfit_count = length(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.clone(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
length(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].adding(unfit_val_locs[1:])
new_blocks.adding(
make_block(values=value_gettingitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = length(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, total_allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
total_allow_duplicates: bool
If False, trying to insert non-distinctive item will raise
"""
if not total_allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not incontainstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smtotal_allints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == length(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.clone()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.adding is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.adding(self._blklocs, 0)
self._blknos = np.adding(self._blknos, length(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, length(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if length(self.blocks) > 100:
self._consolidate_inplace()
def reindexing_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, clone=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindexing(
new_index, method=method, limit=limit)
return self.reindexing_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, clone=clone)
def reindexing_indexer(self, new_axis, indexer, axis, fill_value=None,
total_allow_dups=False, clone=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
total_allow_dups : bool
monkey-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not clone:
return self
result = self.clone(deep=clone)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't total_allow reindexinging with dups
if not total_allow_dups:
self.axes[axis]._can_reindexing(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
total_allow_fill = fill_tuple is not None
sl_type, slobj, sllength = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], total_allow_fill=total_allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.gettingitem_block(slobj,
new_mgr_locs=slice(0, sllength))]
elif not total_allow_fill or self.ndim == 1:
if total_allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllength),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
total_allow_fill=total_allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
total_allow_fill=total_allow_fill)
# When filling blknos, make sure blknos is umkated before addinging to
# blocks list, that way new blkno is exactly length(blocks).
#
# FIXME: mgr_grouper_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _getting_blkno_placements(blknos, length(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.adding(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a clone of that single item.
for mgr_loc in mgr_locs:
newblk = blk.clone(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.adding(newblk)
else:
blocks.adding(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = length(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along whatever axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if incontainstance(indexer, slice) \
else np.aswhateverarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).whatever():
raise Exception('Indices must be nonzero and less than '
'the axis lengthgth')
new_labels = self.axes[axis].take(indexer)
return self.reindexing_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, total_allow_dups=True)
def unioner(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to unioner managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concating_indexes([l, r])
new_blocks = [blk.clone(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.clone(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.adding(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check total_all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if length(self_axes) != length(other_axes):
return False
if not total_all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if length(self.blocks) != length(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.convert_list())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return total_all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if incontainstance(axis, list):
if length(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if incontainstance(block, list):
# empty block
if length(block) == 0:
block = [np.array([])]
elif length(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if incontainstance(block, list):
# provide consolidation to the interleaved_dtype
if length(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.totype(dtype) for b in block]
block = _consolidate(block)
if length(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not incontainstance(block, Block):
block = make_block(block,
placement=slice(0, length(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindexing(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, clone=True):
# if we are the same and don't clone, just return
if self.index.equals(new_axis):
if clone:
return self.clone(deep=True)
else:
return self
values = self._block.getting_values()
if indexer is None:
indexer = self.items.getting_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, clone=clone,
placement=slice(0, length(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def getting_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.employ('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def getting_dtype_counts(self):
return {self.dtype.name: 1}
def getting_ftype_counts(self):
return {self.ftype: 1}
def getting_dtypes(self):
return np.array([self._block.dtype])
def getting_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def getting_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),clone=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.getting_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for gettingting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(mapping(int, [tot_items] + list(block_shape)))
implied = tuple(mapping(int, [length(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".formating(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if length(blocks) == 1 and not incontainstance(blocks[0], Block):
# if blocks[0] is of lengthgth 0, return empty blocks
if not length(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basictotal_ally "total_all items", but if there're mwhatever, don't bother
# converting, it's an error whateverway.
blocks = [make_block(values=blocks[0],
placement=slice(0, length(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [gettingattr(b, 'values', b) for b in blocks]
tot_items = total_sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(length(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(length(names_idx))
else:
assert names_idx.interst(axes[0]).is_distinctive
names_indexer = names_idx.getting_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.adding(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if incontainstance(v, (SparseArray, ABCSparseCollections)):
sparse_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.adding((i, k, v))
else:
datetime_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).whatever():
object_items.adding((i, k, v))
continue
int_items.adding((i, k, v))
elif v.dtype == np.bool_:
bool_items.adding((i, k, v))
elif is_categorical(v):
cat_items.adding((i, k, v))
else:
object_items.adding((i, k, v))
blocks = []
if length(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if length(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if length(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if length(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if length(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if length(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if length(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if length(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if length(extra_locs):
shape = (length(extra_locs),) + tuple(length(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.adding(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.totype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentitotal_ally have different dtypes """
# group by dtype
grouper = itertools.grouper(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.adding(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentitotal_ally have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.adding(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if incontainstance(x, ABCCollections):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if incontainstance(x, ABCCollections):
return length(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (length(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not length(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].adding(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = length(counts[IntBlock]) > 0
have_bool = length(counts[BoolBlock]) > 0
have_object = length(counts[ObjectBlock]) > 0
have_float = length(counts[FloatBlock]) > 0
have_complex = length(counts[ComplexBlock]) > 0
have_dt64 = length(counts[DatetimeBlock]) > 0
have_td64 = length(counts[TimeDeltaBlock]) > 0
have_cat = length(counts[CategoricalBlock]) > 0
have_sparse = length(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if length(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.grouper(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
unionerd_blocks = _unioner_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if incontainstance(unionerd_blocks, list):
new_blocks.extend(unionerd_blocks)
else:
new_blocks.adding(unionerd_blocks)
return new_blocks
def _unioner_blocks(blocks, dtype=None, _can_consolidate=True):
if length(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if length(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_unioner_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case total_all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatingenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no unioner
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work avalue_round NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = incontainstance(a, np.ndarray)
is_b_array = incontainstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
res = False
else:
res = op(a, b)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concating_indexes(indexes):
return indexes[0].adding(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from monkey.core.internals import make_block
panel_shape = (length(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and getting_minor
# labels, for converting to panel formating.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.total_all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(length(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.total_sum(np.array(labels).T * np.adding(mult, [1]), axis=1).T)
def _getting_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.getting_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_renagetting_ming = left.interst(right)
if length(to_renagetting_ming) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_renagetting_ming)
def lrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, lsuffix)
return x
def rrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenagetting_mingr),
_transform_index(right, rrenagetting_mingr))
def _transform_index(index, func):
"""
Apply function to total_all values found in index.
This includes transforgetting_ming multiindex entries separately.
"""
if incontainstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, umkated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the lengthgth of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * length(m))
elif incontainstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndgetting_min=1), length(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.totype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.total_all():
nv = v.clone()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.totype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatingenate_block_managers(mgrs_indexers, axes, concating_axis, clone):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concating_axis : int
clone : bool
"""
concating_plan = combine_concating_plans([getting_mgr_concatingenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concating_axis)
blocks = [make_block(concatingenate_join_units(join_units, concating_axis,
clone=clone),
placement=placement)
for placement, join_units in concating_plan]
return BlockManager(blocks, axes)
def getting_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatingenating specified units.
Returned N/A value may be None which averages there was no casting involved.
Returns
-------
dtype
na
"""
if length(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * length(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype detergetting_mination in getting_concating_dtype")
def concatingenate_join_units(join_units, concating_axis, clone):
"""
Concatenate values from several join units along selected axis.
"""
if concating_axis == 0 and length(join_units) > 1:
# Concatenating join units along ax0 is handled in _unioner_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = getting_empty_dtype_and_na(join_units)
to_concating = [ju.getting_reindexinged_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if length(to_concating) == 1:
# Only one block, nothing to concatingenate.
concating_values = to_concating[0]
if clone and concating_values.base is not None:
concating_values = concating_values.clone()
else:
concating_values = com._concating_compat(to_concating, axis=concating_axis)
return concating_values
def getting_mgr_concatingenation_plan(mgr, indexers):
"""
Construct concatingenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindexing shape , save for item axis which will be separate
# for each block whateverway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = length(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _getting_blkno_placements(blknos, length(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.clone()
shape = list(mgr_shape)
shape[0] = length(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexinging = (
length(placements) == length(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindexing its
# block: no ax0 reindexinging took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: total_all indexer locs are sequential (and
# lengthgth match is checked above).
(np.diff(ax0_blk_indexer) == 1).total_all()))
# Omit indexer if no item reindexinging is required.
if unit_no_ax0_reindexinging:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.adding((placements, unit))
return plan
def combine_concating_plans(plans, concating_axis):
"""
Combine multiple concatingenation plans into one.
existing_plan is umkated in-place.
"""
if length(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concating_axis == 0:
offset = 0
for plan in plans:
final_item_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
final_item_plc = plc
if final_item_plc is not None:
offset += final_item_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list( | mapping(iter, plans) | pandas.compat.map |
"""This module contains total_all the stress models that available in
Pastas. Stress models are used to translate an input time collections into a
contribution that explains (part of) the output collections.
Supported Stress models
-----------------------
The following stressmodels are currently supported and tested:
.. autototal_summary::
:nosignatures:
:toctree: ./generated
StressModel
StressModel2
RechargeModel
FactorModel
StepModel
WellModel
TarsoModel
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="sm1")
>>> ml.add_stressmodel(stressmodel=sm)
See Also
--------
pastas.model.Model.add_stressmodel
Warnings
--------
All other stressmodels are for research purposes only and are not (yet)
fully supported and tested.
"""
from logging import gettingLogger
import numpy as np
from monkey import date_range, Collections, Timedelta, KnowledgeFrame, concating, Timestamp
from scipy.signal import fftconvolve
from .decorators import set_parameter, njit
from .recharge import Linear
from .rfunc import One, Exponential, HantushWellModel
from .timecollections import TimeCollections
from .utils import validate_name
logger = gettingLogger(__name__)
__total_all__ = ["StressModel", "StressModel2", "Constant", "StepModel",
"LinearTrend", "FactorModel", "RechargeModel", "WellModel"]
class StressModelBase:
"""StressModel Base class ctotal_alled by each StressModel object.
Attributes
----------
name: str
Name of this stressmodel object. Used as prefix for the parameters.
parameters: monkey.KnowledgeFrame
Dataframe containing the parameters.
"""
_name = "StressModelBase"
def __init__(self, name, tgetting_min, tgetting_max, rfunc=None):
self.name = validate_name(name)
self.tgetting_min = tgetting_min
self.tgetting_max = tgetting_max
self.freq = None
self.rfunc = rfunc
self.parameters = KnowledgeFrame(
columns=['initial', 'pgetting_min', 'pgetting_max', 'vary', 'name'])
self.stress = []
@property
def nparam(self):
return self.parameters.index.size
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values."""
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'initial'] = value
@set_parameter
def set_pgetting_min(self, name, value):
"""Internal method to set the lower bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_min'] = value
@set_parameter
def set_pgetting_max(self, name, value):
"""Internal method to set the upper bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_max'] = value
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'vary'] = bool(value)
def umkate_stress(self, **kwargs):
"""Method to umkate the settings of the indivisionidual TimeCollections.
Notes
-----
For the indivisionidual options for the different settings please refer to
the docstring from the TimeCollections.umkate_collections() method.
See Also
--------
ps.timecollections.TimeCollections.umkate_collections
"""
for stress in self.stress:
stress.umkate_collections(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def dump_stress(self, collections=True):
"""Method to dump total_all stresses in the stresses list.
Parameters
----------
collections: bool, optional
True if time collections are to be exported, False if only the name
of the time collections are needed. Settings are always exported.
Returns
-------
data: dict
dictionary with the dump of the stresses.
"""
data = []
for stress in self.stress:
data.adding(stress.convert_dict(collections=collections))
return data
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
"""Returns the stress or stresses of the time collections object as a monkey
KnowledgeFrame.
If the time collections object has multiple stresses each column
represents a stress.
Returns
-------
stress: monkey.Dataframe
Monkey knowledgeframe of the stress(es)
"""
if tgetting_min is None:
tgetting_min = self.tgetting_min
if tgetting_max is None:
tgetting_max = self.tgetting_max
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
return self.stress[0].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"name": self.name,
"stress": self.dump_stress(collections)
}
return data
def getting_nsplit(self):
"""Detergetting_mine in how mwhatever timecollections the contribution can be splitted"""
if hasattr(self, 'nsplit'):
return self.nsplit
else:
return length(self.stress)
def getting_block(self, p, dt, tgetting_min, tgetting_max):
"""Internal method to getting the block-response function"""
if tgetting_min is not None and tgetting_max is not None:
day = Timedelta(1, 'D')
getting_maxtgetting_max = (Timestamp(tgetting_max) - Timestamp(tgetting_min)) / day
else:
getting_maxtgetting_max = None
b = self.rfunc.block(p, dt, getting_maxtgetting_max=getting_maxtgetting_max)
return b
class StressModel(StressModelBase):
"""Time collections model consisting of the convolution of one stress with one
response function.
Parameters
----------
stress: monkey.Collections
monkey Collections object containing the stress.
rfunc: rfunc class
Response function used in the convolution with the stress.
name: str
Name of the stress.
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: dict or str, optional
The settings of the stress. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
metadata: dict, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
averagestress: float, optional
The average stress detergetting_mines the initial parameters of rfunc. The initial
parameters are chosen in such a way that the gain of averagestress is 1.
Examples
--------
>>> import pastas as ps
>>> import monkey as mk
>>> sm = ps.StressModel(stress=mk.Collections(), rfunc=ps.Gamma, name="Prec",
>>> settings="prec")
See Also
--------
pastas.rfunc
pastas.timecollections.TimeCollections
"""
_name = "StressModel"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=None, metadata=None, averagestress=None):
if incontainstance(stress, list):
stress = stress[0] # TODO Temporary fix Raoul, 2017-10-24
stress = TimeCollections(stress, settings=settings, metadata=metadata)
if averagestress is None:
averagestress = stress.collections.standard()
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=averagestress)
StressModelBase.__init__(self, name=name,
tgetting_min=stress.collections.index.getting_min(),
tgetting_max=stress.collections.index.getting_max(), rfunc=rfunc)
self.freq = stress.settings["freq"]
self.stress = [stress]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1.0):
"""Simulates the header_num contribution.
Parameters
----------
p: numpy.ndarray
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p, dt, tgetting_min, tgetting_max)
stress = self.stress[0].collections
npoints = stress.index.size
h = Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StressModel2(StressModelBase):
"""Time collections model consisting of the convolution of two stresses with one
response function. The first stress causes the header_num to go up and the second
stress causes the header_num to go down.
Parameters
----------
stress: list of monkey.Collections or list of pastas.timecollections
list of two monkey.Collections or pastas.timecollections objects containing the
stresses. Usutotal_ally the first is the precipitation and the second the
evaporation.
rfunc: pastas.rfunc instance
Response function used in the convolution with the stress.
name: str
Name of the stress
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: Tuple with two dicts, optional
The settings of the indivisionidual TimeCollections.
settings: list of dicts or strs, optional
The settings of the stresses. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
Default is ("prec", "evap").
metadata: list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
Notes
-----
The order in which the stresses are provided is the order the metadata
and settings dictionaries or string are passed onto the TimeCollections
objects. By default, the precipitation stress is the first and the
evaporation stress the second stress.
See Also
--------
pastas.rfunc
pastas.timecollections
"""
_name = "StressModel2"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None),
averagestress=None):
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0 = TimeCollections(stress[0], settings=settings[0],
metadata=metadata[0])
stress1 = TimeCollections(stress[1], settings=settings[1],
metadata=metadata[1])
# Select indices from validated stress where both collections are available.
index = stress0.collections.index.interst(stress1.collections.index)
if index.empty:
msg = ('The two stresses that were provided have no '
'overlapping time indices. Please make sure the '
'indices of the time collections overlap.')
logger.error(msg)
raise Exception(msg)
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
stress1.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
if averagestress is None:
averagestress = (stress0.collections - stress1.collections).standard()
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=averagestress)
StressModelBase.__init__(self, name=name, tgetting_min=index.getting_min(),
tgetting_max=index.getting_max(), rfunc=rfunc)
self.stress.adding(stress0)
self.stress.adding(stress1)
self.freq = stress0.settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters back to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
self.parameters.loc[self.name + '_f'] = \
(-1.0, -2.0, 0.0, True, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1, istress=None):
"""Simulates the header_num contribution.
Parameters
----------
p: numpy.ndarray
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
istress: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
stress = self.getting_stress(p=p, tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq,
istress=istress)
if istress == 1:
stress = p[-1] * stress
npoints = stress.index.size
h = Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
if istress is not None:
if self.stress[istress].name is not None:
h.name = h.name + ' (' + self.stress[istress].name + ')'
return h
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
if tgetting_min is None:
tgetting_min = self.tgetting_min
if tgetting_max is None:
tgetting_max = self.tgetting_max
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
if istress is None:
if p is None:
p = self.parameters.initial.values
return self.stress[0].collections.add(p[-1] * self.stress[1].collections)
elif istress == 0:
return self.stress[0].collections
else:
return self.stress[1].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StepModel(StressModelBase):
"""Stressmodel that simulates a step trend.
Parameters
----------
tstart: str or Timestamp
String with the start date of the step, e.g. '2018-01-01'. This
value is fixed by default. Use ml.set_vary("step_tstart", 1) to vary
the start time of the step trend.
name: str
String with the name of the stressmodel.
rfunc: pastas.rfunc.RfuncBase, optional
Pastas response function used to simulate the effect of the step.
Default is rfunc.One, an instant effect.
up: bool, optional
Force a direction of the step. Default is None.
Notes
-----
This step trend is calculated as follows. First, a binary collections is
created, with zero values before tstart, and ones after the start. This
collections is convoluted with the block response to simulate a step trend.
"""
_name = "StepModel"
def __init__(self, tstart, name, rfunc=One, up=True, cutoff=0.999):
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=1.0)
StressModelBase.__init__(self, name=name, tgetting_min=Timestamp.getting_min,
tgetting_max=Timestamp.getting_max, rfunc=rfunc)
self.tstart = Timestamp(tstart)
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.getting_init_parameters(self.name)
tgetting_min = Timestamp.getting_min.toordinal()
tgetting_max = Timestamp.getting_max.toordinal()
tinit = self.tstart.toordinal()
self.parameters.loc[self.name + "_tstart"] = (tinit, tgetting_min, tgetting_max,
False, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1):
tstart = Timestamp.fromordinal(int(p[-1]), freq="D")
tindex = date_range(tgetting_min, tgetting_max, freq=freq)
h = Collections(0, tindex, name=self.name)
h.loc[h.index > tstart] = 1
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
npoints = h.index.size
h = Collections(data=fftconvolve(h, b, 'full')[:npoints],
index=h.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
data = {
"stressmodel": self._name,
'tstart': self.tstart,
'name': self.name,
"up": self.rfunc.up,
'rfunc': self.rfunc._name
}
return data
class LinearTrend(StressModelBase):
"""Stressmodel that simulates a linear trend.
start: str
String with a date to start the trend, will be transformed to an
ordinal number interntotal_ally. E.g. "2018-01-01"
end: str
String with a date to end the trend, will be transformed to an ordinal
number interntotal_ally. E.g. "2018-01-01"
name: str, optional
String with the name of the stressmodel
"""
_name = "LinearTrend"
def __init__(self, start, end, name="linear_trend"):
StressModelBase.__init__(self, name=name, tgetting_min=Timestamp.getting_min,
tgetting_max=Timestamp.getting_max)
self.start = start
self.end = end
self.set_init_parameters()
def set_init_parameters(self):
start = Timestamp(self.start).toordinal()
end = Timestamp(self.end).toordinal()
tgetting_min = | Timestamp.getting_min.toordinal() | pandas.Timestamp.min.toordinal |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = | algos.incontain([1, 2], [1]) | pandas.core.algorithms.isin |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = | algos.incontain(s, s[0:2]) | pandas.core.algorithms.isin |
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that averages it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
getting_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
getting_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
getting_ipython().system(u'mkdir "{path}"')
getting_ipython().magic(u'cd "{path}"')
import sys; sys.path.adding(path)
getting_ipython().system(u'git config --global user.email "<EMAIL>"')
getting_ipython().system(u'git config --global user.name "reco-tut"')
getting_ipython().system(u'git init')
getting_ipython().system(u'git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git')
getting_ipython().system(u'git pull origin "{branch}"')
getting_ipython().system(u'git checkout main')
else:
getting_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import monkey as mk
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
kf = mk.read_parquet('./data/silver/rating.parquet.gz')
kf.info()
# In[16]:
kf2 = mk.read_parquet('./data/silver/items.parquet.gz')
kf2.info()
# In[17]:
kf = mk.unioner(kf, kf2, on='itemId')
kf.info()
# In[5]:
rating_matrix = mk.pivot_table(kf, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanaverage(user1)
user2=np.array(user2)-np.nanaverage(user2)
commonItemIds=[i for i in range(length(user1)) if user1[i]>0 and user2[i]>0]
if length(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divisionide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=mk.KnowledgeFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix=mk.KnowledgeFrame.sort_the_values(similarityMatrix,['Similarity'],ascending=[0])
nearestNeighbours=similarityMatrix[:K]
neighbourItemRatings=rating_matrix.loc[nearestNeighbours.index]
predictItemRating=mk.KnowledgeFrame(index=rating_matrix.columns, columns=['Rating'])
for i in rating_matrix.columns:
predictedRating=np.nanaverage(rating_matrix.loc[activeUser])
for j in neighbourItemRatings.index:
if rating_matrix.loc[j,i]>0:
predictedRating += (rating_matrix.loc[j,i]-np.nanaverage(rating_matrix.loc[j]))*nearestNeighbours.loc[j,'Similarity']
predictItemRating.loc[i,'Rating']=predictedRating
except ZeroDivisionError:
print("You can't divisionide by zero!")
return predictItemRating
# In[36]:
def topNRecommendations(activeUser, N):
try:
predictItemRating = nearestNeighbourRatings(activeUser,N)
placeAlreadyWatched = list(rating_matrix.loc[activeUser].loc[rating_matrix.loc[activeUser]>0].index)
predictItemRating = predictItemRating.sip(placeAlreadyWatched)
topRecommendations = mk.KnowledgeFrame.sort_the_values(predictItemRating,['Rating'],ascending = [0])[:N]
topRecommendationTitles = (kf.loc[kf.itemId.incontain(topRecommendations.index)])
except ZeroDivisionError:
print("You can't divisionide by zero!")
return list([topRecommendationTitles.location,
topRecommendationTitles.place,
topRecommendationTitles.state,
topRecommendationTitles.location_rating])
# In[42]:
def favoritePlace(activeUser,N):
topPlace= | mk.KnowledgeFrame.sort_the_values(kf[kf.userId==activeUser],['rating'],ascending=[0]) | pandas.DataFrame.sort_values |
"""
Quick and dirty ADIF parser.
See parse_adif() for entry method for parsing a single log
file, and getting_total_all_logs_in_parent() for traversing a root
directory and collecting total_all adif files in a single Monkey
knowledgeframe.
"""
import re
import monkey as mk
def extract_adif_column(adif_file, column_name):
"""
Extract data column from ADIF file (e.g. 'OPERATOR' column).
Parameters
----------
adif_file: file object
ADIF file opened using open().
column_name: str
Name of column (e.g. OPERATOR).
Returns
-------
matches: list of str
List of values extracted from the ADIF file.
"""
pattern = re.compile('^.*<' + column_name + ':\d+>([^<]*)<.*$', re.IGNORECASE)
matches = [re.match(pattern, line)
for line in adif_file]
matches = [line[1].strip() for line in matches if line is not None]
adif_file.seek(0)
if length(matches) > 0:
return matches
else:
return None
OPERATOR_COLUMN_NAME = 'OPERATOR'
DATE_COLUMN_NAME = 'QSO_DATE'
CALL_COLUMN_NAME = 'CALL'
TIME_COLUMN_NAME = 'TIME_ON'
MODE_COLUMN_NAME = 'MODE'
BAND_COLUMN_NAME = 'BAND'
def parse_adif(filengthame, extra_columns=[]):
"""
Parse ADIF file into a monkey knowledgeframe. Currently tries to find operator,
date, time and ctotal_all fields. Additional fields can be specified.
Parameters
----------
filengthame: str
Path to ADIF file.
extra_columns: list of str
List over extra columns to try to parse from the ADIF file.
Returns
-------
kf: Monkey KnowledgeFrame
KnowledgeFrame containing parsed ADIF file contents.
"""
kf = mk.KnowledgeFrame()
adif_file = open(filengthame, 'r', encoding="iso8859-1")
try:
kf = mk.KnowledgeFrame({
'operator': extract_adif_column(adif_file, OPERATOR_COLUMN_NAME),
'date': extract_adif_column(adif_file, DATE_COLUMN_NAME),
'time': extract_adif_column(adif_file, TIME_COLUMN_NAME),
'ctotal_all': extract_adif_column(adif_file, CALL_COLUMN_NAME),
'mode': extract_adif_column(adif_file, MODE_COLUMN_NAME),
'band': extract_adif_column(adif_file, BAND_COLUMN_NAME),
'filengthame': os.path.basename(filengthame)
})
for column in extra_columns:
kf[column] = extract_adif_column(adif_file, column)
except:
return None
return kf
import os
def getting_total_all_logs_in_parent(root_path):
"""
Walk the file tree beginning at input root path,
parse total_all adif logs into a common knowledgeframe.
Parameters
----------
root_path: str
Root path.
Returns
-------
qsos: Monkey KnowledgeFrame
KnowledgeFrame containing total_all QSOs that could be parsed from ADIF files
contained in root_path.
"""
qsos = mk.KnowledgeFrame()
for root, dirs, files in os.walk(root_path):
for filengthame in files:
if filengthame.endswith(('.adi', '.ADI')):
path = os.path.join(root, filengthame)
qsos = mk.concating((qsos, parse_adif(path)))
return qsos
def store_to_csv(mk, outfile):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
with open(outfile, 'w') as f:
numFaulty = 0
f.write("date, time, operator, band, mode, ctotal_all\n")
for i, row in mk.traversal():
operator_ = row['operator']
mode_ = row['mode']
ctotal_all_ = row["ctotal_all"]
band_ = row['band']
date_ = row['date']
if row['operator'] is None:
numFaulty +=1
print(numFaulty,"\t",row['filengthame'], "lacks operator")
operator_ = "Uknown"
if row['mode'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks mode")
mode_ = "Unknown"
if row['ctotal_all'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
ctotal_all_ = "Unknown"
if row['band'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
band_ = "Unknown"
if row['date'] is None:
numFaulty += 1
print(numFaulty, "\t", row['filengthame'], "lacks ctotal_all")
date_ = "Unknown"
f.write(date_ + ",\t" + row['time'] + ",\t" + operator_ + ",\t" + band_ + ",\t" + mode_ + ",\t" + ctotal_all_ + "\n")
def getting_num_before_data(mk, number, regex):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
count = 0
mk = mk.sort_the_values(by=['date'], ascending=False)
for i, row in | mk.traversal() | pandas.iterrows |
"""
Concat routines.
"""
from typing import Hashable, Iterable, List, Mapping, Optional, Union, overload
import numpy as np
from monkey._typing import FrameOrCollectionsUnion
from monkey.core.dtypes.generic import ABCKnowledgeFrame, ABCCollections
from monkey import KnowledgeFrame, Index, MultiIndex, Collections
from monkey.core.arrays.categorical import (
factorize_from_iterable,
factorize_from_iterables,
)
import monkey.core.common as com
from monkey.core.generic import NDFrame
from monkey.core.indexes.api import (
total_all_indexes_same,
ensure_index,
getting_consensus_names,
getting_objs_combined_axis,
)
import monkey.core.indexes.base as ibase
from monkey.core.internals import concatingenate_block_managers
# ---------------------------------------------------------------------
# Concatenate KnowledgeFrame objects
@overload
def concating(
objs: Union[Iterable["KnowledgeFrame"], Mapping[Optional[Hashable], "KnowledgeFrame"]],
axis=0,
join: str = "outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
clone: bool = True,
) -> "KnowledgeFrame":
...
@overload
def concating(
objs: Union[
Iterable[FrameOrCollectionsUnion], Mapping[Optional[Hashable], FrameOrCollectionsUnion]
],
axis=0,
join: str = "outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
clone: bool = True,
) -> FrameOrCollectionsUnion:
...
def concating(
objs: Union[
Iterable[FrameOrCollectionsUnion], Mapping[Optional[Hashable], FrameOrCollectionsUnion]
],
axis=0,
join="outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
clone: bool = True,
) -> FrameOrCollectionsUnion:
"""
Concatenate monkey objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatingenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mappingping of Collections or KnowledgeFrame objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be sipped silengthtly unless
they are total_all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatingenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatingenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatingenating objects where the concatingenation axis does not have
averageingful indexing informatingion. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (distinctive values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatingenated axis contains duplicates. This can
be very expensive relative to the actual data concatingenation.
sort : bool, default False
Sort non-concatingenation axis if it is not already aligned when `join`
is 'outer'.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatingenation axis.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed to not sort by default.
clone : bool, default True
If False, do not clone data unnecessarily.
Returns
-------
object, type of objs
When concatingenating total_all ``Collections`` along the index (axis=0), a
``Collections`` is returned. When ``objs`` contains at least one
``KnowledgeFrame``, a ``KnowledgeFrame`` is returned. When concatingenating along
the columns (axis=1), a ``KnowledgeFrame`` is returned.
See Also
--------
Collections.adding : Concatenate Collections.
KnowledgeFrame.adding : Concatenate KnowledgeFrames.
KnowledgeFrame.join : Join KnowledgeFrames using indexes.
KnowledgeFrame.unioner : Merge KnowledgeFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are total_all optional.
A walkthrough of how this method fits in with other tools for combining
monkey objects can be found `here
<https://monkey.pydata.org/monkey-docs/stable/user_guide/merging.html>`__.
Examples
--------
Combine two ``Collections``.
>>> s1 = mk.Collections(['a', 'b'])
>>> s2 = mk.Collections(['c', 'd'])
>>> mk.concating([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> mk.concating([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> mk.concating([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> mk.concating([s1, s2], keys=['s1', 's2'],
... names=['Collections name', 'Row ID'])
Collections name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``KnowledgeFrame`` objects with identical columns.
>>> kf1 = mk.KnowledgeFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> kf1
letter number
0 a 1
1 b 2
>>> kf2 = mk.KnowledgeFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> kf2
letter number
0 c 3
1 d 4
>>> mk.concating([kf1, kf2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``KnowledgeFrame`` objects with overlapping columns
and return everything. Columns outside the interst will
be filled with ``NaN`` values.
>>> kf3 = mk.KnowledgeFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> kf3
letter number animal
0 c 3 cat
1 d 4 dog
>>> mk.concating([kf1, kf3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``KnowledgeFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> mk.concating([kf1, kf3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``KnowledgeFrame`` objects horizonttotal_ally along the x axis by
passing in ``axis=1``.
>>> kf4 = mk.KnowledgeFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> mk.concating([kf1, kf4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> kf5 = mk.KnowledgeFrame([1], index=['a'])
>>> kf5
0
a 1
>>> kf6 = mk.KnowledgeFrame([2], index=['a'])
>>> kf6
0
a 2
>>> mk.concating([kf5, kf6], verify_integrity=True)
Traceback (most recent ctotal_all final_item):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(
objs,
axis=axis,
ignore_index=ignore_index,
join=join,
keys=keys,
levels=levels,
names=names,
verify_integrity=verify_integrity,
clone=clone,
sort=sort,
)
return op.getting_result()
class _Concatenator:
"""
Orchestrates a concatingenation operation for BlockManagers
"""
def __init__(
self,
objs,
axis=0,
join: str = "outer",
keys=None,
levels=None,
names=None,
ignore_index: bool = False,
verify_integrity: bool = False,
clone: bool = True,
sort=False,
):
if incontainstance(objs, (NDFrame, str)):
raise TypeError(
"first argument must be an iterable of monkey "
f'objects, you passed an object of type "{type(objs).__name__}"'
)
if join == "outer":
self.intersect = False
elif join == "inner":
self.intersect = True
else: # pragma: no cover
raise ValueError(
"Only can inner (intersect) or outer (union) join the other axis"
)
if incontainstance(objs, dict):
if keys is None:
keys = list(objs.keys())
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if length(objs) == 0:
raise ValueError("No objects to concatingenate")
if keys is None:
objs = list(com.not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.adding(k)
clean_objs.adding(v)
objs = clean_objs
name = gettingattr(keys, "name", None)
keys = Index(clean_keys, name=name)
if length(objs) == 0:
raise ValueError("All objects passed were None")
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not incontainstance(obj, (Collections, KnowledgeFrame)):
msg = (
f"cannot concatingenate object of type '{type(obj)}'; "
"only Collections and KnowledgeFrame objs are valid"
)
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# getting the sample_by_num
# want the highest ndim that we have, and must be non-empty
# unless total_all objs are empty
sample_by_num = None
if length(ndims) > 1:
getting_max_ndim = getting_max(ndims)
for obj in objs:
if obj.ndim == getting_max_ndim and np.total_sum(obj.shape):
sample_by_num = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Collections as it affect to result columns / name
non_empties = [
obj for obj in objs if total_sum(obj.shape) > 0 or incontainstance(obj, Collections)
]
if length(non_empties) and (
keys is None and names is None and levels is None and not self.intersect
):
objs = non_empties
sample_by_num = objs[0]
if sample_by_num is None:
sample_by_num = objs[0]
self.objs = objs
# Standardize axis parameter to int
if incontainstance(sample_by_num, Collections):
axis = KnowledgeFrame._getting_axis_number(axis)
else:
axis = sample_by_num._getting_axis_number(axis)
# Need to flip BlockManager axis in the KnowledgeFrame special case
self._is_frame = incontainstance(sample_by_num, ABCKnowledgeFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_collections = incontainstance(sample_by_num, ABCCollections)
if not 0 <= axis <= sample_by_num.ndim:
raise AssertionError(
f"axis must be between 0 and {sample_by_num.ndim}, input was {axis}"
)
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if length(ndims) > 1:
current_column = 0
getting_max_ndim = sample_by_num.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == getting_max_ndim:
pass
elif ndim != getting_max_ndim - 1:
raise ValueError(
"cannot concatingenate unaligned mixed "
"dimensional NDFrame objects"
)
else:
name = gettingattr(obj, "name", None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatingenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample_by_num._constructor({name: obj})
self.objs.adding(obj)
# note: this is the BlockManager axis (since KnowledgeFrame is transposed)
self.axis = axis
self.keys = keys
self.names = names or gettingattr(keys, "names", None)
self.levels = levels
self.sort = sort
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.clone = clone
self.new_axes = self._getting_new_axes()
def getting_result(self):
# collections only
if self._is_collections:
# stack blocks
if self.axis == 0:
name = com.consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concating(
[x._data for x in self.objs], self.new_axes
)
cons = self.objs[0]._constructor
return cons(mgr, name=name).__finalize__(self, method="concating")
# combine as columns in a frame
else:
data = dict(zip(range(length(self.objs)), self.objs))
cons = KnowledgeFrame
index, columns = self.new_axes
kf = cons(data, index=index)
kf.columns = columns
return kf.__finalize__(self, method="concating")
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexinging on concating axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindexing(new_labels)[1]
mgrs_indexers.adding((obj._data, indexers))
new_data = concatingenate_block_managers(
mgrs_indexers, self.new_axes, concating_axis=self.axis, clone=self.clone
)
if not self.clone:
new_data._consolidate_inplace()
cons = self.objs[0]._constructor
return cons(new_data).__finalize__(self, method="concating")
def _getting_result_dim(self) -> int:
if self._is_collections and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _getting_new_axes(self) -> List[Index]:
ndim = self._getting_result_dim()
return [
self._getting_concating_axis() if i == self.axis else self._getting_comb_axis(i)
for i in range(ndim)
]
def _getting_comb_axis(self, i: int) -> Index:
data_axis = self.objs[0]._getting_block_manager_axis(i)
return getting_objs_combined_axis(
self.objs,
axis=data_axis,
intersect=self.intersect,
sort=self.sort,
clone=self.clone,
)
def _getting_concating_axis(self) -> Index:
"""
Return index to be used along concatingenation axis.
"""
if self._is_collections:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = ibase.default_index(length(self.objs))
return idx
elif self.keys is None:
names: List[Optional[Hashable]] = [None] * length(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not incontainstance(x, Collections):
raise TypeError(
f"Cannot concatingenate type 'Collections' with "
f"object of type '{type(x).__name__}'"
)
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return ibase.default_index(length(self.objs))
else:
return ensure_index(self.keys).set_names(self.names)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = ibase.default_index(total_sum(length(i) for i in indexes))
return idx
if self.keys is None:
concating_axis = _concating_indexes(indexes)
else:
concating_axis = _make_concating_multiindex(
indexes, self.keys, self.levels, self.names
)
self._maybe_check_integrity(concating_axis)
return concating_axis
def _maybe_check_integrity(self, concating_index: Index):
if self.verify_integrity:
if not concating_index.is_distinctive:
overlap = concating_index[concating_index.duplicated_values()].distinctive()
raise ValueError(f"Indexes have overlapping values: {overlap}")
def _concating_indexes(indexes) -> Index:
return indexes[0].adding(indexes[1:])
def _make_concating_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex:
if (levels is None and incontainstance(keys[0], tuple)) or (
levels is not None and length(levels) > 1
):
zipped = list(zip(*keys))
if names is None:
names = [None] * length(zipped)
if levels is None:
_, levels = factorize_from_iterables(zipped)
else:
levels = [ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [ensure_index(keys)]
else:
levels = [ensure_index(x) for x in levels]
if not | total_all_indexes_same(indexes) | pandas.core.indexes.api.all_indexes_same |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8=mk.Collections.convert_list(d1[0:16][8])
list9=mk.Collections.convert_list(d1[0:16][9])
list10=mk.Collections.convert_list(d1[0:16][10])
#forecast table
c=[]
for j in total_all_t[1].findAll('td'):
c.adding(j.text)
bv=mk.KnowledgeFrame()
i=0
while(i<=(91-13)):
bv=bv.adding(mk.KnowledgeFrame([c[i:i+13]]) )
i=i+13
listq1=mk.Collections.convert_list(bv[0:7][0])
list11=mk.Collections.convert_list(bv[0:7][1])
list21=mk.Collections.convert_list(bv[0:7][2])
list31=mk.Collections.convert_list(bv[0:7][3])
list41=mk.Collections.convert_list(bv[0:7][4])
list51=mk.Collections.convert_list(bv[0:7][5])
list61= | mk.Collections.convert_list(bv[0:7][6]) | pandas.Series.tolist |
import numpy as np
import pandapower as pp
from monkey import KnowledgeFrame as kf
from aries.core.constants import PCC_VOLTAGE, NON_LINEAR_SOLVER
from aries.simulation.solver.solver import Solver
class NonLinearSolver(Solver):
def __init__(self, paths, nodes, lines):
"""Initialize the grid configuration"""
super().__init__(paths=paths, nodes=nodes, lines=lines)
self.type = NON_LINEAR_SOLVER
def build(self, agents_states):
net = pp.create_empty_network()
buses_dict = {}
bus_to_idx = {}
bus_idx = 0
for bus_name in self.nodes.keys():
buses_dict[bus_name] = pp.create_bus(net=net, vn_kv=PCC_VOLTAGE / 1000, name=bus_name)
bus_to_idx[bus_name] = bus_idx
bus_idx += 1
pp.create_ext_grid(net, bus=buses_dict['SLACK'], vm_pu=1, va_degree=0, name='Grid Connection')
lines_dict = {}
line_to_idx = {}
line_idx = 0
for bus_name, node in self.nodes.items():
if node.agent is not None:
agent_name = node.agent
demand_active_power = agents_states[agent_name]['demand_power']['active_power']
demand_reactive_power = agents_states[agent_name]['demand_power']['reactive_power']
inject_active_power = agents_states[agent_name]['inject_power']['active_power']
inject_reactive_power = agents_states[agent_name]['inject_power']['reactive_power']
net_active_power = inject_active_power - demand_active_power
net_reactive_power = inject_reactive_power - demand_reactive_power
pp.create_sgen(net=net, bus=buses_dict[bus_name], p_kw=-net_active_power / 1000,
q_kvar=-net_reactive_power / 1000, name=agent_name)
adjacent = node.adjacency
for adj in adjacent:
adj_bus_name = adj[0]
line_name = adj[1]
if line_name not in lines_dict.keys():
lines_dict[line_name] = pp.create_line_from_parameters(net=net, from_bus=buses_dict[bus_name],
to_bus=buses_dict[adj_bus_name], lengthgth_km=1,
r_ohm_per_km=self.lines[
line_name].resistance,
x_ohm_per_km=self.lines[
line_name].reactance, c_nf_per_km=0,
getting_max_i_ka=1, name=line_name)
line_to_idx[line_name] = line_idx
line_idx += 1
return net, line_to_idx, bus_to_idx
def power_from_main(self, grid_solution):
return np.complex(grid_solution['buses']['SLACK']['p_kw'] * 1000,
grid_solution['buses']['SLACK']['q_kvar'] * 1000)
def power_distribution_loss(self, grid_solution):
power = 0
for line_name in self.lines.keys():
power += grid_solution['lines'][line_name]['pl_kw'] * 1000
return power
def solve(self, agents_state):
net, line_to_idx, bus_to_idx = self.build(agents_state)
pp.runpp(net)
result_bus_dict = | kf.convert_dict(net.res_bus, orient='index') | pandas.DataFrame.to_dict |
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array(KnowledgeFrame.sip_duplicates(kf[['End_Time']]))
bbb = np.array2string(aaa)
ccc = bbb.replacing("[", "")
ddd = ccc.replacing("]", "")
eee = ddd.replacing("\n", ",")
fff = eee.replacing("'", "")
ggg = fff.replacing('"', "")
# print(ggg.split(","))
X = kf.iloc[:, 33:140]
# y = kf.loc[:,['Survey_Type','Date','Country']]
# y = kf.loc[:,['Country']]
y = kf.loc[:, ['Photos']]
# print(y)
from monkey import KnowledgeFrame
a = np.array(KnowledgeFrame.sip_duplicates(y))
b = np.array2string(a)
c = b.replacing("[", "")
d = c.replacing("]", "")
e = d.replacing("\n", ",")
g = e.replacing('"', "")
f = g.replacing("'", "")
h = f.split(",")
# print(ff)
# print(y.duplicated_values())
change = LabelEncoder()
y['Photos_Change'] = change.fit_transform(y['Photos'])
# y['Date_Change'] = change.fit_transform(y['Date'])
# y['State_Change'] = change.fit_transform(y['State'])
# y['County_Change'] = change.fit_transform(y['County'])
# y['Country_Change'] = change.fit_transform(y['Country'])
y_n = y.sip(['Photos'], axis='columns')
aa = np.array(KnowledgeFrame.sip_duplicates(y))
bb = np.array2string(aa)
cc = bb.replacing("[", "")
dd = cc.replacing("]", "")
ee = dd.replacing("\n", ",")
gg = ee.replacing('"', "")
ff = gg.replacing("'", "")
hh = ff.split(",")
# print(hh)
# print(h)
# print(y_n)
# print(X)
# print(X_n.shape)
# print(y)
for i in np.arange(1,2,1):
X_train, X_test, y_train, y_test = train_test_split(X.values, y_n.values, test_size=0.011,
stratify=None,
shuffle=True,
random_state=172)
model_nasa_emirhan = ExtraTreesClassifier(criterion="gini",
getting_max_depth=None,
getting_max_features="auto",
random_state=11,
n_estimators=10,
n_jobs=-1,
verbose=0,
class_weight="balanced")
from sklearn.multioutput import MultiOutputClassifier
model_nasa_emirhan.fit(X_train, y_train)
pred_nasa = model_nasa_emirhan.predict(X_test)
from sklearn.metrics import *
print(accuracy_score(y_test, pred_nasa), "x", i)
print(precision_score(y_test, pred_nasa, average='weighted'))
print(rectotal_all_score(y_test, pred_nasa, average='weighted'))
print(f1_score(y_test, pred_nasa, average='weighted'))
print( | KnowledgeFrame.sip_duplicates(y) | pandas.DataFrame.drop_duplicates |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = | algos.incontain(['a', 'b'], ['a']) | pandas.core.algorithms.isin |
import abc
import collections
import ipaddress
import six
import numpy as np
import monkey as mk
from monkey.api.extensions import ExtensionDtype
from ._accessor import (DelegatedMethod, DelegatedProperty,
delegated_method)
from ._utils import combine, pack, unpack
from .base import NumPyBackedExtensionArrayMixin
from .common import _U8_MAX, _IPv4_MAX
from .parser import _to_ipaddress_pyint, _as_ip_object
# -----------------------------------------------------------------------------
# Extension Type
# -----------------------------------------------------------------------------
@six.add_metaclass(abc.ABCMeta)
class IPv4v6Base(object):
"""Metaclass providing a common base class for the two scalar IP types."""
pass
IPv4v6Base.register(ipaddress.IPv4Address)
IPv4v6Base.register(ipaddress.IPv6Address)
@mk.api.extensions.register_extension_dtype
class IPType(ExtensionDtype):
name = 'ip'
type = IPv4v6Base
kind = 'O'
_record_type = np.dtype([('hi', '>u8'), ('lo', '>u8')])
na_value = ipaddress.IPv4Address(0)
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError("Cannot construct a '{}' from "
"'{}'".formating(cls, string))
@classmethod
def construct_array_type(cls):
return IPArray
# -----------------------------------------------------------------------------
# Extension Container
# -----------------------------------------------------------------------------
class IPArray(NumPyBackedExtensionArrayMixin):
"""Holder for IP Addresses.
IPArray is a container for IPv4 or IPv6 addresses. It satisfies monkey'
extension array interface, and so can be stored inside
:class:`monkey.Collections` and :class:`monkey.KnowledgeFrame`.
See :ref:`usage` for more.
"""
# A note on the internal data layout. IPv6 addresses require 128 bits,
# which is more than a uint64 can store. So we use a NumPy structured array
# with two fields, 'hi', 'lo' to store the data. Each field is a uint64.
# The 'hi' field contains upper 64 bits. The think this is correct since
# total_all IP traffic is big-endian.
__array_priority__ = 1000
_dtype = IPType()
_itemsize = 16
ndim = 1
can_hold_na = True
def __init__(self, values, dtype=None, clone=False):
from .parser import _to_ip_array
values = _to_ip_array(values) # TODO: avoid potential clone
# TODO: dtype?
if clone:
values = values.clone()
self.data = values
@classmethod
def from_pyints(cls, values):
"""Construct an IPArray from a sequence of Python integers.
This can be useful for representing IPv6 addresses, which may
be larger than 2**64.
Parameters
----------
values : Sequence
Sequence of Python integers.
Examples
--------
>>> IPArray.from_pyints([0, 10, 2 ** 64 + 1])
IPArray(['0.0.0.1', '0.0.0.2', '0.0.0.3', '0:0:0:1::'])
"""
return cls(_to_ipaddress_pyint(values))
@classmethod
def from_bytes(cls, bytestring):
r"""Create an IPArray from a bytestring.
Parameters
----------
bytestring : bytes
Note that bytestring is a Python 3-style string of bytes,
not a sequences of bytes where each element represents an
IPAddress.
Returns
-------
IPArray
Examples
--------
>>> arr = IPArray([10, 20])
>>> buf = arr.to_bytes()
>>> buf
b'\x00\x00\...x00\x02'
>>> IPArray.from_bytes(buf)
IPArray(['0.0.0.10', '0.0.0.20'])
See Also
--------
to_bytes
from_pyints
"""
data = np.frombuffer(bytestring, dtype=IPType._record_type)
return cls._from_ndarray(data)
@classmethod
def _from_ndarray(cls, data, clone=False):
"""Zero-clone construction of an IPArray from an ndarray.
Parameters
----------
data : ndarray
This should have IPType._record_type dtype
clone : bool, default False
Whether to clone the data.
Returns
-------
ExtensionArray
"""
if clone:
data = data.clone()
new = IPArray([])
new.data = data
return new
@property
def _as_u8(self):
"""A 2-D view on our underlying data, for bit-level manipulation."""
return self.data.view("<u8").reshape(-1, 1)
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
@property
def na_value(self):
"""The missing value sentinal for IP Addresses.
The address ``'0.0.0.0'`` is used.
Examples
--------
>>> IPArray([]).na_value
IPv4Address('0.0.0.0')
"""
return self.dtype.na_value
def take(self, indices, total_allow_fill=False, fill_value=None):
# Can't use monkey' take yet
# 1. axis
# 2. I don't know how to do the reshaping correctly.
indices = np.asarray(indices, dtype='int')
if total_allow_fill and fill_value is None:
fill_value = unpack(pack(int(self.na_value)))
elif total_allow_fill and not incontainstance(fill_value, tuple):
fill_value = unpack(pack(int(fill_value)))
if total_allow_fill:
mask = (indices == -1)
if not length(self):
if not (indices == -1).total_all():
msg = "Invalid take for empty array. Must be total_all -1."
raise IndexError(msg)
else:
# total_all NA take from and empty array
took = (np.full((length(indices), 2), fill_value, dtype='>u8')
.reshape(-1).totype(self.dtype._record_type))
return self._from_ndarray(took)
if (indices < -1).whatever():
msg = ("Invalid value in 'indicies'. Must be total_all >= -1 "
"for 'total_allow_fill=True'")
raise ValueError(msg)
took = self.data.take(indices)
if total_allow_fill:
took[mask] = fill_value
return self._from_ndarray(took)
# -------------------------------------------------------------------------
# Interfaces
# -------------------------------------------------------------------------
def __repr__(self):
formatingted = self._formating_values()
return "IPArray({!r})".formating(formatingted)
def _formating_values(self):
formatingted = []
# TODO: perf
for i in range(length(self)):
hi, lo = self.data[i]
if lo == -1:
formatingted.adding("NA")
elif hi == 0 and lo <= _IPv4_MAX:
formatingted.adding(ipaddress.IPv4Address._string_from_ip_int(
int(lo)))
elif hi == 0:
formatingted.adding(ipaddress.IPv6Address._string_from_ip_int(
int(lo)))
else:
# TODO:
formatingted.adding(ipaddress.IPv6Address._string_from_ip_int(
(int(hi) << 64) + int(lo)))
return formatingted
@staticmethod
def _box_scalar(scalar):
return ipaddress.ip_address(combine(*scalar))
@property
def _parser(self):
from .parser import to_ipaddress
return to_ipaddress
def __setitem__(self, key, value):
from .parser import to_ipaddress
value = to_ipaddress(value).data
self.data[key] = value
def __iter__(self):
return iter(self.to_pyipaddress())
# ------------------------------------------------------------------------
# Serializaiton / Export
# ------------------------------------------------------------------------
def to_pyipaddress(self):
"""Convert the array to a list of scalar IP Adress objects.
Returns
-------
addresses : List
Each element of the list will be an :class:`ipaddress.IPv4Address`
or :class:`ipaddress.IPv6Address`, depending on the size of that
element.
See Also
--------
IPArray.to_pyints
Examples
---------
>>> IPArray(['192.168.1.1', '2001:db8::1000']).to_pyipaddress()
[IPv4Address('192.168.1.1'), IPv6Address('2001:db8::1000')]
"""
import ipaddress
return [ipaddress.ip_address(x) for x in self._formating_values()]
def to_pyints(self):
"""Convert the array to a list of Python integers.
Returns
-------
addresses : List[int]
These will be Python integers (not NumPy), which are unbounded in
size.
See Also
--------
IPArray.to_pyipaddresses
IPArray.from_pyints
Examples
--------
>>> IPArray(['192.168.1.1', '2001:db8::1000']).to_pyints()
[3232235777, 42540766411282592856903984951653830656]
"""
return [combine(*mapping(int, x)) for x in self.data]
def to_bytes(self):
r"""Serialize the IPArray as a Python bytestring.
This and :meth:IPArray.from_bytes is the fastest way to value_roundtrip
serialize and de-serialize an IPArray.
See Also
--------
IPArray.from_bytes
Examples
--------
>>> arr = IPArray([10, 20])
>>> arr.to_bytes()
b'\x00\x00\...x00\x02'
"""
return self.data.tobytes()
def totype(self, dtype, clone=True):
if incontainstance(dtype, IPType):
if clone:
self = self.clone()
return self
return super(IPArray, self).totype(dtype)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
def __eq__(self, other):
# TDOO: scalar ipaddress
if not incontainstance(other, IPArray):
return NotImplemented
mask = self.ifna() | other.ifna()
result = self.data == other.data
result[mask] = False
return result
def __lt__(self, other):
# TDOO: scalar ipaddress
if not incontainstance(other, IPArray):
return NotImplemented
mask = self.ifna() | other.ifna()
result = ((self.data['hi'] <= other.data['hi']) &
(self.data['lo'] < other.data['lo']))
result[mask] = False
return result
def __le__(self, other):
if not incontainstance(other, IPArray):
return NotImplemented
mask = self.ifna() | other.ifna()
result = ((self.data['hi'] <= other.data['hi']) &
(self.data['lo'] <= other.data['lo']))
result[mask] = False
return result
def __gt__(self, other):
if not incontainstance(other, IPArray):
return NotImplemented
return other < self
def __ge__(self, other):
if not incontainstance(other, IPArray):
return NotImplemented
return other <= self
def equals(self, other):
if not incontainstance(other, IPArray):
raise TypeError("Cannot compare 'IPArray' "
"to type '{}'".formating(type(other)))
# TODO: missing
return (self.data == other.data).total_all()
def _values_for_factorize(self):
return self.totype(object), ipaddress.IPv4Address(0)
def ifna(self):
"""Indicator for whether each element is missing.
The IPAddress 0 is used to indecate missing values.
Examples
--------
>>> IPArray(['0.0.0.0', '192.168.1.1']).ifna()
array([ True, False])
"""
ips = self.data
return (ips['lo'] == 0) & (ips['hi'] == 0)
def incontain(self, other):
"""Check whether elements of `self` are in `other`.
Comparison is done elementwise.
Parameters
----------
other : str or sequences
For ``str`` `other`, the argument is attempted to
be converted to an :class:`ipaddress.IPv4Network` or
a :class:`ipaddress.IPv6Network` or an :class:`IPArray`.
If total_all those conversions fail, a TypeError is raised.
For a sequence of strings, the same conversion is attempted.
You should not mix networks with addresses.
Fintotal_ally, other may be an ``IPArray`` of addresses to compare to.
Returns
-------
contained : ndarray
A 1-D boolean ndarray with the same lengthgth as self.
Examples
--------
Comparison to a single network
>>> s = IPArray(['192.168.1.1', '255.255.255.255'])
>>> s.incontain('192.168.1.0/24')
array([ True, False])
Comparison to mwhatever networks
>>> s.incontain(['192.168.1.0/24', '192.168.2.0/24'])
array([ True, False])
Comparison to mwhatever IP Addresses
>>> s.incontain(['192.168.1.1', '192.168.1.2', '255.255.255.1']])
array([ True, False])
"""
box = (incontainstance(other, str) or
not incontainstance(other, (IPArray, collections.Sequence)))
if box:
other = [other]
networks = []
addresses = []
if not incontainstance(other, IPArray):
for net in other:
net = _as_ip_object(net)
if incontainstance(net, (ipaddress.IPv4Network,
ipaddress.IPv6Network)):
networks.adding(net)
if incontainstance(net, (ipaddress.IPv4Address,
ipaddress.IPv6Address)):
addresses.adding(ipaddress.IPv6Network(net))
else:
addresses = other
# Flatten total_all the addresses
addresses = IPArray(addresses) # TODO: think about clone=False
mask = np.zeros(length(self), dtype='bool')
for network in networks:
mask |= self._incontain_network(network)
# no... we should flatten this.
mask |= self._incontain_addresses(addresses)
return mask
def _incontain_network(self, other):
"""Check whether an array of addresses is contained in a network."""
# A network is bounded below by 'network_address' and
# above by 'broadcast_address'.
# IPArray handles comparisons between arrays of addresses, and NumPy
# handles broadcasting.
net_lo = type(self)([other.network_address])
net_hi = type(self)([other.broadcast_address])
return (net_lo <= self) & (self <= net_hi)
def _incontain_addresses(self, other):
"""Check whether elements of self are present in other."""
from monkey.core.algorithms import incontain
# TODO(factorize): replacing this
return | incontain(self, other) | pandas.core.algorithms.isin |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from monkey._libs import (
lib,
tslib,
)
from monkey._libs.arrays import NDArrayBacked
from monkey._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
getting_resolution,
iNaT,
ints_convert_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from monkey._typing import npt
from monkey.errors import PerformanceWarning
from monkey.util._validators import validate_inclusive
from monkey.core.dtypes.cast import totype_dt64_to_dt64tz
from monkey.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_whatever_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import DatetimeTZDtype
from monkey.core.dtypes.generic import ABCMultiIndex
from monkey.core.dtypes.missing import ifna
from monkey.core.algorithms import checked_add_with_arr
from monkey.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from monkey.core.arrays._ranges import generate_regular_range
from monkey.core.arrays.integer import IntegerArray
import monkey.core.common as com
from monkey.core.construction import extract_array
from monkey.tcollections.frequencies import getting_period_alias
from monkey.tcollections.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from monkey import KnowledgeFrame
from monkey.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.getting("startingMonth", kwds.getting("month", 12))
result = fields.getting_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.getting_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.getting_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.getting_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Monkey ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Collections, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Collections or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype total_allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
clone : bool, default False
Whether to clone the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_whatever_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"getting_minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"value_round",
"floor",
"ceiling",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, clone: bool = False):
values = extract_array(values, extract_numpy=True)
if incontainstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = gettingattr(values, "_freq", None)
if incontainstance(values, type(self)):
# validation
dtz = gettingattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not incontainstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Collections or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes getting here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not total_allowed in DatetimeArray.__init__. "
"Use 'mk.array()' instead."
)
if clone:
values = values.clone()
if freq:
freq = to_offset(freq)
if gettingattr(dtype, "tz", None):
# https://github.com/monkey-dev/monkey/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert incontainstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, clone: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, clone=clone)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
clone: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
clone=clone,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and whatever(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to average calengthdar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if incontainstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if incontainstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.totype("M8[ns]", clone=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and length(index) and index[0] == start:
index = index[1:]
if not right_inclusive and length(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not incontainstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if incontainstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of monkey will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if whatever.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return gettingattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if total_all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return getting_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(length(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
lengthgth = length(self)
chunksize = 10000
chunks = (lengthgth // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = getting_min((i + 1) * chunksize, lengthgth)
converted = ints_convert_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def totype(self, dtype, clone: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = monkey_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if clone:
return self.clone()
return self
elif is_datetime64_ns_dtype(dtype):
return | totype_dt64_to_dt64tz(self, dtype, clone, via_utc=False) | pandas.core.dtypes.cast.astype_dt64_to_dt64tz |
from Common.Measures.Portfolio.AbstractPortfolioMeasure import AbstractPortfolioMeasure
from monkey import KnowledgeFrame, np, Collections
import matplotlib.pyplot as plt
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_total_allocation import DiscreteAllocation, getting_latest_prices
from Common.Measures.Portfolio.PortfolioStats import PortfolioStats
class PortfolioOptimizer(AbstractPortfolioMeasure):
_threshold: int = 5000
_a_float: float = -1.1
_legend_place : str = ''
_weight_matrix: np.ndarray
_annual_weighted_log_return_matrix: np.ndarray
_risk_matrix: np.ndarray
_sharpe_ratio_matrix: np.ndarray
_getting_min_risk_collections: Collections = Collections()
_getting_max_sharpe_ratio_collections: Collections = Collections()
_portfolio_data: KnowledgeFrame = KnowledgeFrame()
_efficient_frontier: EfficientFrontier
_discrete_total_allocation: DiscreteAllocation
def __init__(self, legend_place: str, a_float: float, p_stats: PortfolioStats, portfolio_data: KnowledgeFrame = KnowledgeFrame()):
self._legend_place = legend_place
self._a_float = a_float
self._portfolio_data = portfolio_data
# Creating an empty array to store portfolio weights
self._weight_matrix = np.zeros((self._threshold, length(portfolio_data.columns)))
# Creating an empty array to store portfolio returns
self._annual_weighted_log_return_matrix = np.zeros(self._threshold)
# Creating an empty array to store portfolio risks
self._risk_matrix = np.zeros(self._threshold)
# Creating an empty array to store portfolio sharpe ratio
self._sharpe_ratio_matrix = np.zeros(self._threshold)
self._setMatrices(portfolio_data, p_stats.LogDailyReturns, p_stats.LogAnnualCovarianceMatrix)
print('portfolio_risk.getting_min', self._risk_matrix.getting_min())
print('sharpe_ratio.getting_max', self._sharpe_ratio_matrix.getting_max())
self._getting_min_risk_collections = \
self._gettingMinimalRisk(self._weight_matrix[self._risk_matrix.arggetting_min()], portfolio_data.columns)
print(self._getting_min_risk_collections)
#self._plotMinimalRisk()
self._getting_max_sharpe_ratio_collections = \
self._gettingMaximalSharpeRatio(self._weight_matrix[self._sharpe_ratio_matrix.arggetting_max()], portfolio_data.columns)
print(self._getting_max_sharpe_ratio_collections)
#self._plotMaximalSharpeRatio()
#self._plotRiskReturns(portfolio_data)
#mu: Collections = expected_returns.average_historical_return(portfolio_data) # returns.average() * 252
#S: KnowledgeFrame = risk_models.sample_by_num_cov(portfolio_data) # Get the sample_by_num covariance matrix
#ef: EfficientFrontier = EfficientFrontier(mu, S)
self._efficient_frontier = self._gettingEfficientFrontier(portfolio_data)
# Maximize the Sharpe ratio, and getting the raw weights
getting_max_weights = self._efficient_frontier.getting_max_sharpe()
# Note the weights may have some value_rounding error, averageing they may not add up exactly to 1 but should be close
cleaned_weights = self._efficient_frontier.clean_weights()
self._efficient_frontier.portfolio_performance(verbose=True)
latest_prices_collections: Collections = getting_latest_prices(portfolio_data)
getting_max_weights = cleaned_weights
self._discrete_total_allocation = DiscreteAllocation(getting_max_weights, latest_prices_collections, total_portfolio_value=10000)
total_allocation, leftover = self._discrete_total_allocation.lp_portfolio()
print("Discrete total_allocation:", total_allocation)
print("Funds remaining: ${:.2f}".formating(leftover))
def Plot(self):
plt.style.use('seaborn')
plt.rcParams['date.epoch'] = '0000-12-31'
fig, ax = plt.subplots(1, 2, figsize=(self._a_float, self._a_float/2.0), sharey=True)
# ax1
self._getting_min_risk_collections.plot(kind='bar', ax=ax[0])
ax[0].set(xlabel='Risk Asset', ylabel='Weights', title='Minimal Risk')
ax[0].set_xticklabels(ax[0].getting_xticklabels(), rotation=40)
# ax2
self._getting_max_sharpe_ratio_collections.plot(kind='bar', ax=ax[1])
ax[1].set(xlabel='Sharpe Ratio Asset', ylabel='Sharpe Ratio Weights', title='Maximal Sharpe Ratio')
ax[1].set_xticklabels(ax[1].getting_xticklabels(), rotation=40)
plt.tight_layout()
plt.show()
#self._plotMinimalRisk().show()
'''plt.style.use('seaborn')
plt.rcParams['date.epoch'] = '0000-12-31'
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax1.set_xlabel('Asset')
ax1.set_ylabel('Weights')
ax1.set_title('Minimal Risk Portfolio weights')
self._getting_min_risk_collections.plot(kind='bar')
plt.setp(ax1.getting_xticklabels(), rotation=45)
plt.show()'''
#self._plotMaximalSharpeRatio().show()
'''plt.style.use('seaborn')
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax1.set_xlabel('Asset')
ax1.set_ylabel('Weights')
ax1.set_title('Maximal Sharpe Ratio Portfolio weights')
self._getting_max_sharpe_ratio_collections.plot(kind='bar')
plt.setp(ax1.getting_xticklabels(), rotation=45)
plt.show()'''
self._plotRiskReturns(self._portfolio_data).show()
def _gettingEfficientFrontier(self, portfolio_data) -> EfficientFrontier:
mu: Collections = expected_returns.average_historical_return(portfolio_data) # returns.average() * 252
S: KnowledgeFrame = risk_models.sample_by_num_cov(portfolio_data) # Get the sample_by_num covariance matrix
return EfficientFrontier(mu, S)
def _setMatrices(self, portfolio_data: KnowledgeFrame, log_ret: KnowledgeFrame, cov_mat: KnowledgeFrame):
for i in range(self._threshold):
weight_arr: np.ndarray = np.random.uniform(size=length(portfolio_data.columns))
weight_arr = weight_arr / | np.total_sum(weight_arr) | pandas.np.sum |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_totype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
totype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("M8[us]")
result = | totype_overflowsafe(arr, dtype2) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raincontaing) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_grouper_as_index_collections_scalar gettings here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gettings here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not total_allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba = | GroupByApply(self, [func], args=(), kwargs={}) | pandas.core.apply.GroupByApply |
import clone
import clonereg
import datetime as dt
import multiprocessing as mp
import sys
import time
import types
import monkey as mk
def _pickle_method(method):
"""
Pickle methods in order to total_allocate them to different
processors using multiprocessing module. It tells the engine how
to pickle methods.
:param method: method to be pickled
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Unpickle methods in order to total_allocate them to different
processors using multiprocessing module. It tells the engine how
to unpickle methods.
:param func_name: func name to unpickle
:param obj: pickled object
:param cls: class method
:return: unpickled function
"""
func = None
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__getting(obj, cls)
clonereg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def mapping_reduce_jobs(func, molecules, threads=24, batches=1, linear_molecules=True, redux=None,
redux_args={}, redux_in_place=False, report_progress=False, **kargs):
"""
Partotal_allelize jobs and combine them into a single output
:param func: function to be partotal_allelized
:param molecules[0]: Name of argument used to pass the molecule
:param molecules[1]: List of atoms that will be grouped into molecules
:param threads: number of threads
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: Whether partition will be linear or double-nested
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating wether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:param kargs: whatever other argument needed by func
:param report_progress: Whether progressed will be logged or not
:return results combined into a single output
"""
parts = __create_parts(batches, linear_molecules, molecules, threads)
jobs = __create_jobs(func, kargs, molecules, parts)
out = __process_jobs_redux(jobs, redux=redux, redux_args=redux_args, redux_in_place=redux_in_place, threads=threads,
report_progress=report_progress)
return out
def mapping_jobs(func, molecules, threads=24, batches=1, linear_molecules=True, report_progress=False,
**kargs):
"""
Partotal_allelize jobs, return a KnowledgeFrame or Collections
:param func: function to be partotal_allelized
:param molecules: monkey object
:param molecules[0]: Name of argument used to pass the molecule
:param molecules[1]: List of atoms that will be grouped into molecules
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: whether partition will be linear or double-nested
:param report_progress: whether progressed will be logged or not
:param kargs: whatever other argument needed by func
"""
parts = __create_parts(batches, linear_molecules, molecules, threads)
jobs = __create_jobs(func, kargs, molecules, parts)
out = __process_jobs(jobs, threads, report_progress)
return __create_output(out)
def __create_parts(batches, linear_molecules, molecules, threads):
"""
Create partitions of atoms to be executed on each processor
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: Whether partition will be linear or double-nested
:param molecules: monkey object
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:return: partitions array
"""
if linear_molecules:
return __linear_parts(length(molecules[1]), threads * batches)
else:
return __nested_parts(length(molecules[1]), threads * batches)
def __create_output(out):
"""
Create KnowledgeFrame or Collections output if needed
:param out: result array
:return: return the result as a KnowledgeFrame or Collections if needed
"""
import monkey as mk
if incontainstance(out[0], mk.KnowledgeFrame):
kf0 = mk.KnowledgeFrame()
elif incontainstance(out[0], mk.Collections):
kf0 = mk.Collections()
else:
return out
for i in out:
kf0 = kf0.adding(i)
return kf0.sorting_index()
def __process_jobs(jobs, threads, report_progress):
"""
Process jobs
:param jobs: jobs to process
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:param report_progress: Whether progressed will be logged or not
:return: result output
"""
if threads == 1:
out = __process_jobs_sequentitotal_ally_for_debugging(jobs)
else:
out = __process_jobs_in_partotal_allel(jobs=jobs, threads=threads, report_progress=report_progress)
return out
def __create_jobs(func, kargs, molecules, parts):
"""
Create jobs
:param func: function to be executed
:param kargs: whatever other argument needed by the function
:param parts: partitionned list of atoms to be passed to the function
"""
jobs = []
for i in range(1, length(parts)):
job = {molecules[0]: molecules[1][parts[i - 1]: parts[i]], 'func': func}
job.umkate(kargs)
jobs.adding(job)
return jobs
def __process_jobs_in_partotal_allel(jobs, task=None, threads=24, report_progress=False):
"""
Process jobs with a multiprocess Pool
:param jobs: jobs to be processed (data to be passed to task)
:param task: func to be executed for each jobs
:param threads: number of threads to create
:param report_progress: Whether progressed will be logged or not
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=threads)
outputs, out, time0 = pool.imapping_unordered(__expand_ctotal_all, jobs), [], time.time()
__mapping_outputs(jobs, out, outputs, task, time0, report_progress)
pool.close()
pool.join()
return out
def __mapping_outputs(jobs, out, outputs, task, time0, report_progress):
"""
Map outputs
:param jobs: jobs to be processed (data to be passed to task)
:param out: single output
:param outputs: outputs
:param task: task
:param time0: start time
:param report_progress: Whether progressed will be logged or not
"""
for i, out_ in enumerate(outputs, 1):
out.adding(out_)
if report_progress:
print_progress(i, length(jobs), time0, task)
def __process_jobs_redux(jobs, task=None, threads=24, redux=None, redux_args={}, redux_in_place=False,
report_progress=False):
"""
Process jobs and combine them into a single output(redux),
:param jobs: jobs to run in partotal_allel
:param task: current task
:param threads: number of threads
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
be passed to redux (if whatever).
:param redux_in_place: a boolean, indicating wether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:param report_progress: Whether progressed will be logged or not
:return: job result array
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=threads)
imapping = pool.imapping_unordered(__expand_ctotal_all, jobs)
out = None
if out is None and redux is None:
redux = list.adding
redux_in_place = True
time0 = time.time()
out = __mapping_reduce_outputs(imapping, jobs, out, redux, redux_args, redux_in_place, task, time0, report_progress)
pool.close()
pool.join()
if incontainstance(out, (mk.Collections, mk.KnowledgeFrame)):
out = out.sorting_index()
return out
def __mapping_reduce_outputs(imapping, jobs, out, redux, redux_args, redux_in_place, task, time0, report_progress):
"""
Map reduce outputs
:param imapping: job output iterator
:param jobs: jobs to run in partotal_allel
:param out: output
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating whether the redux operation should happen in-place or not.
:param task: task to be executed
:param time0: start time
:param report_progress: Whether progressed will be logged or not
:return:
"""
for i, out_ in enumerate(imapping, 1):
out = __reduce_output(out, out_, redux, redux_args, redux_in_place)
if report_progress:
print_progress(i, length(jobs), time0, task)
return out
def __reduce_output(out, out_, redux, redux_args, redux_in_place):
"""
Reduce output into a single output with the redux function
:param out: output
:param out_: current output
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating whether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:return:
"""
if out is None:
if redux is None:
out = [out_]
else:
out = clone.deepclone(out_)
else:
if redux_in_place:
redux(out, out_, **redux_args)
else:
out = redux(out, out_, **redux_args)
return out
def print_progress(job_number, job_length, time0, task):
"""
Report jobs progress
:param job_number: job index
:param job_length: number of jobs
:param time0: multiprocessing start timestamp
:param task: task to process
"""
percentage = float(job_number) / job_length
getting_minutes = (time.time() - time0) / 60.
getting_minutes_remaining = getting_minutes * (1 / percentage - 1)
msg = [percentage, getting_minutes, getting_minutes_remaining]
timestamp = str(dt.datetime.fromtimestamp(time.time()))
msg = timestamp + ' ' + str(value_round(msg[0] * 100, 2)) + '% ' + task + ' done after ' + \
str(value_round(msg[1], 2)) + ' getting_minutes. Remaining ' + str(value_round(msg[2], 2)) + ' getting_minutes.'
if job_number < job_length:
sys.standarderr.write(msg + '\r')
else:
sys.standarderr.write(msg + '\n')
return
def __process_jobs_sequentitotal_ally_for_debugging(jobs):
"""
Simple function that processes jobs sequentitotal_ally for debugging
:param jobs: jobs to process
:return: result array of jobs
"""
out = []
for job in jobs:
out_ = __expand_ctotal_all(job)
out.adding(out_)
return out
def __expand_ctotal_all(kargs):
"""
Pass the job (molecule) to the ctotal_allback function
Expand the arguments of a ctotal_allback function, kargs['func']
:param kargs: argument needed by ctotal_allback func
"""
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
def __linear_parts(number_of_atoms, number_of_threads):
"""
Partition a list of atoms in subset of equal size between the number of processors and the number of atoms.
:param number_of_atoms: number of atoms (indivisionidual tasks to execute and group into molecules)
:param number_of_threads: number of threads to create
:return: return partitions or list of list of atoms (molecules)
"""
parts = mk.np.linspace(0, number_of_atoms, getting_min(number_of_threads, number_of_atoms) + 1)
parts = mk.np.ceiling(parts).totype(int)
return parts
def __nested_parts(number_of_atoms, number_of_threads, upper_triangle=False):
"""
Partition of atoms with an inner loop
:param number_of_atoms: number of atoms (indivisionidual tasks to execute and group into molecules)
:param number_of_threads: number of threads to create
:param upper_triangle:
:return: return partitions or list of list of atoms (molecules)
"""
parts = [0]
number_of_threads_ = getting_min(number_of_threads, number_of_atoms)
for num in range(number_of_threads_):
part = 1 + 4 * (parts[-1] ** 2 + parts[-1] + number_of_atoms * (number_of_atoms + 1.) / number_of_threads_)
part = (-1 + part ** .5) / 2.
parts.adding(part)
parts = | mk.np.value_round(parts) | pandas.np.round |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses `Collections` class, that is distributed version of `monkey.Collections`."""
import numpy as np
import monkey
from monkey.core.common import employ_if_ctotal_allable, is_bool_indexer
from monkey.util._validators import validate_bool_kwarg
from monkey.core.dtypes.common import (
is_dict_like,
is_list_like,
)
from monkey._libs.lib import no_default
from monkey._typing import IndexKeyFunc
from monkey.util._decorators import doc
import sys
from typing import Union, Optional
import warnings
from modin.utils import _inherit_docstrings, to_monkey, Engine
from modin.config import IsExperimental, PersistentPickle
from .base import BaseMonkeyDataset, _ATTRS_NO_LOOKUP
from .iterator import PartitionIterator
from .utils import from_monkey, is_scalar
from .accessor import CachedAccessor, SparseAccessor
from . import _umkate_engine
_doc_binary_operation = """
Return {operation} of Collections and `{other}` (binary operator `{bin_op}`).
Parameters
----------
{other} : Collections or scalar value
The second operand to perform computation.
Returns
-------
{returns}
"""
def _doc_binary_op(operation, bin_op, other="right", returns="Collections"):
"""
Return ctotal_allable documenting `Collections` binary operator.
Parameters
----------
operation : str
Operation name.
bin_op : str
Binary operation name.
other : str, default: 'right'
The second operand name.
returns : str, default: 'Collections'
Type of returns.
Returns
-------
ctotal_allable
"""
doc_op = doc(
_doc_binary_operation,
operation=operation,
other=other,
bin_op=bin_op,
returns=returns,
)
return doc_op
@_inherit_docstrings(
monkey.Collections, excluded=[monkey.Collections.__init__], apilink="monkey.Collections"
)
class Collections(BaseMonkeyDataset):
"""
Modin distributed representation of `monkey.Collections`.
Interntotal_ally, the data can be divisionided into partitions in order to partotal_allelize
computations and utilize the user's hardware as much as possible.
Inherit common for KnowledgeFrames and Collections functionality from the
`BaseMonkeyDataset` class.
Parameters
----------
data : modin.monkey.Collections, array-like, Iterable, dict, or scalar value, optional
Contains data stored in Collections. If data is a dict, argument order is
maintained.
index : array-like or Index (1d), optional
Values must be hashable and have the same lengthgth as `data`.
dtype : str, np.dtype, or monkey.ExtensionDtype, optional
Data type for the output Collections. If not specified, this will be
inferred from `data`.
name : str, optional
The name to give to the Collections.
clone : bool, default: False
Copy input data.
fastpath : bool, default: False
`monkey` internal parameter.
query_compiler : BaseQueryCompiler, optional
A query compiler object to create the Collections from.
"""
_monkey_class = monkey.Collections
def __init__(
self,
data=None,
index=None,
dtype=None,
name=None,
clone=False,
fastpath=False,
query_compiler=None,
):
Engine.subscribe(_umkate_engine)
if incontainstance(data, type(self)):
query_compiler = data._query_compiler.clone()
if index is not None:
if whatever(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existent columns or index values to constructor "
"not yet implemented."
)
query_compiler = data.loc[index]._query_compiler
if query_compiler is None:
# Defaulting to monkey
warnings.warn(
"Distributing {} object. This may take some time.".formating(type(data))
)
if name is None:
name = "__reduced__"
if incontainstance(data, monkey.Collections) and data.name is not None:
name = data.name
query_compiler = from_monkey(
monkey.KnowledgeFrame(
monkey.Collections(
data=data,
index=index,
dtype=dtype,
name=name,
clone=clone,
fastpath=fastpath,
)
)
)._query_compiler
self._query_compiler = query_compiler.columnarize()
if name is not None:
self._query_compiler = self._query_compiler
self.name = name
def _getting_name(self):
"""
Get the value of the `name` property.
Returns
-------
hashable
"""
name = self._query_compiler.columns[0]
if name == "__reduced__":
return None
return name
def _set_name(self, name):
"""
Set the value of the `name` property.
Parameters
----------
name : hashable
Name value to set.
"""
if name is None:
name = "__reduced__"
self._query_compiler.columns = [name]
name = property(_getting_name, _set_name)
_parent = None
# Parent axis denotes axis that was used to select collections in a parent knowledgeframe.
# If _parent_axis == 0, then it averages that index axis was used via kf.loc[row]
# indexing operations and total_allocatements should be done to rows of parent.
# If _parent_axis == 1 it averages that column axis was used via kf[column] and total_allocatements
# should be done to columns of parent.
_parent_axis = 0
@_doc_binary_op(operation="addition", bin_op="add")
def __add__(self, right):
return self.add(right)
@_doc_binary_op(operation="addition", bin_op="add", other="left")
def __radd__(self, left):
return self.add(left)
@_doc_binary_op(operation="union", bin_op="and", other="other")
def __and__(self, other):
if incontainstance(other, (list, np.ndarray, monkey.Collections)):
return self._default_to_monkey(monkey.Collections.__and__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__and__(new_other)
@_doc_binary_op(operation="union", bin_op="and", other="other")
def __rand__(self, other):
if incontainstance(other, (list, np.ndarray, monkey.Collections)):
return self._default_to_monkey(monkey.Collections.__rand__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__rand__(new_other)
# add `_inherit_docstrings` decorator to force method link addition.
@_inherit_docstrings(monkey.Collections.__array__, apilink="monkey.Collections.__array__")
def __array__(self, dtype=None): # noqa: PR01, RT01, D200
"""
Return the values as a NumPy array.
"""
return super(Collections, self).__array__(dtype).flatten()
@property
def __array_priority__(self): # pragma: no cover
"""
Return monkey `__array_priority__` Collections internal parameter.
Returns
-------
int
Internal monkey parameter ``__array_priority__`` used during interaction with NumPy.
"""
return self._to_monkey().__array_priority__
# FIXME: __bytes__ was removed in newer monkey versions, so Modin
# can remove it too.
def __bytes__(self):
"""
Return bytes representation of the Collections.
Returns
-------
bytes
Notes
-----
Method is deprecated.
"""
return self._default_to_monkey(monkey.Collections.__bytes__)
def __contains__(self, key):
"""
Check if `key` in the `Collections.index`.
Parameters
----------
key : hashable
Key to check the presence in the index.
Returns
-------
bool
"""
return key in self.index
def __clone__(self, deep=True):
"""
Return the clone of the Collections.
Parameters
----------
deep : bool, default: True
Whether the clone should be deep or not.
Returns
-------
Collections
"""
return self.clone(deep=deep)
def __deepclone__(self, memo=None):
"""
Return the deep clone of the Collections.
Parameters
----------
memo : Any, optional
Deprecated parameter.
Returns
-------
Collections
"""
return self.clone(deep=True)
def __delitem__(self, key):
"""
Delete item identified by `key` label.
Parameters
----------
key : hashable
Key to delete.
"""
if key not in self.keys():
raise KeyError(key)
self.sip(labels=key, inplace=True)
@_doc_binary_op(
operation="integer divisionision and modulo",
bin_op="divisionmod",
returns="tuple of two Collections",
)
def __divisionmod__(self, right):
return self.divisionmod(right)
@_doc_binary_op(
operation="integer divisionision and modulo",
bin_op="divisionmod",
other="left",
returns="tuple of two Collections",
)
def __rdivisionmod__(self, left):
return self.rdivisionmod(left)
def __float__(self):
"""
Return float representation of Collections.
Returns
-------
float
"""
return float(self.squeeze())
@_doc_binary_op(operation="integer divisionision", bin_op="floordivision")
def __floordivision__(self, right):
return self.floordivision(right)
@_doc_binary_op(operation="integer divisionision", bin_op="floordivision")
def __rfloordivision__(self, right):
return self.rfloordivision(right)
def __gettingattr__(self, key):
"""
Return item identified by `key`.
Parameters
----------
key : hashable
Key to getting.
Returns
-------
Any
Notes
-----
First try to use `__gettingattribute__` method. If it fails
try to getting `key` from `Collections` fields.
"""
try:
return object.__gettingattribute__(self, key)
except AttributeError as e:
if key not in _ATTRS_NO_LOOKUP and key in self.index:
return self[key]
raise e
def __int__(self):
"""
Return integer representation of Collections.
Returns
-------
int
"""
return int(self.squeeze())
def __iter__(self):
"""
Return an iterator of the values.
Returns
-------
iterable
"""
return self._to_monkey().__iter__()
@_doc_binary_op(operation="modulo", bin_op="mod")
def __mod__(self, right):
return self.mod(right)
@_doc_binary_op(operation="modulo", bin_op="mod", other="left")
def __rmod__(self, left):
return self.rmod(left)
@_doc_binary_op(operation="multiplication", bin_op="mul")
def __mul__(self, right):
return self.mul(right)
@_doc_binary_op(operation="multiplication", bin_op="mul", other="left")
def __rmul__(self, left):
return self.rmul(left)
@_doc_binary_op(operation="disjunction", bin_op="or", other="other")
def __or__(self, other):
if incontainstance(other, (list, np.ndarray, monkey.Collections)):
return self._default_to_monkey(monkey.Collections.__or__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__or__(new_other)
@_doc_binary_op(operation="disjunction", bin_op="or", other="other")
def __ror__(self, other):
if incontainstance(other, (list, np.ndarray, monkey.Collections)):
return self._default_to_monkey(monkey.Collections.__ror__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__ror__(new_other)
@_doc_binary_op(operation="exclusive or", bin_op="xor", other="other")
def __xor__(self, other):
if incontainstance(other, (list, np.ndarray, monkey.Collections)):
return self._default_to_monkey(monkey.Collections.__xor__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__xor__(new_other)
@_doc_binary_op(operation="exclusive or", bin_op="xor", other="other")
def __rxor__(self, other):
if incontainstance(other, (list, np.ndarray, monkey.Collections)):
return self._default_to_monkey(monkey.Collections.__rxor__, other)
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__rxor__(new_other)
@_doc_binary_op(operation="exponential power", bin_op="pow")
def __pow__(self, right):
return self.pow(right)
@_doc_binary_op(operation="exponential power", bin_op="pow", other="left")
def __rpow__(self, left):
return self.rpow(left)
def __repr__(self):
"""
Return a string representation for a particular Collections.
Returns
-------
str
"""
num_rows = monkey.getting_option("getting_max_rows") or 60
num_cols = monkey.getting_option("getting_max_columns") or 20
temp_kf = self._build_repr_kf(num_rows, num_cols)
if incontainstance(temp_kf, monkey.KnowledgeFrame) and not temp_kf.empty:
temp_kf = temp_kf.iloc[:, 0]
temp_str = repr(temp_kf)
freq_str = (
"Freq: {}, ".formating(self.index.freqstr)
if incontainstance(self.index, monkey.DatetimeIndex)
else ""
)
if self.name is not None:
name_str = "Name: {}, ".formating(str(self.name))
else:
name_str = ""
if length(self.index) > num_rows:
length_str = "Length: {}, ".formating(length(self.index))
else:
length_str = ""
dtype_str = "dtype: {}".formating(
str(self.dtype) + ")"
if temp_kf.empty
else temp_str.rsplit("dtype: ", 1)[-1]
)
if length(self) == 0:
return "Collections([], {}{}{}".formating(freq_str, name_str, dtype_str)
return temp_str.rsplit("\n", 1)[0] + "\n{}{}{}{}".formating(
freq_str, name_str, length_str, dtype_str
)
def __value_round__(self, decimals=0):
"""
Round each value in a Collections to the given number of decimals.
Parameters
----------
decimals : int, default: 0
Number of decimal places to value_round to.
Returns
-------
Collections
"""
return self._create_or_umkate_from_compiler(
self._query_compiler.value_round(decimals=decimals)
)
def __setitem__(self, key, value):
"""
Set `value` identified by `key` in the Collections.
Parameters
----------
key : hashable
Key to set.
value : Any
Value to set.
"""
if incontainstance(key, slice):
self._setitem_slice(key, value)
else:
self.loc[key] = value
@_doc_binary_op(operation="subtraction", bin_op="sub")
def __sub__(self, right):
return self.sub(right)
@_doc_binary_op(operation="subtraction", bin_op="sub", other="left")
def __rsub__(self, left):
return self.rsub(left)
@_doc_binary_op(operation="floating divisionision", bin_op="truedivision")
def __truedivision__(self, right):
return self.truedivision(right)
@_doc_binary_op(operation="floating divisionision", bin_op="truedivision", other="left")
def __rtruedivision__(self, left):
return self.rtruedivision(left)
__iadd__ = __add__
__imul__ = __add__
__ipow__ = __pow__
__isub__ = __sub__
__itruedivision__ = __truedivision__
@property
def values(self): # noqa: RT01, D200
"""
Return Collections as ndarray or ndarray-like depending on the dtype.
"""
return super(Collections, self).to_numpy().flatten()
def add(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return Addition of collections and other, element-wise (binary operator add).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).add(
new_other, level=level, fill_value=fill_value, axis=axis
)
def add_prefix(self, prefix): # noqa: PR01, RT01, D200
"""
Prefix labels with string `prefix`.
"""
return Collections(query_compiler=self._query_compiler.add_prefix(prefix, axis=0))
def add_suffix(self, suffix): # noqa: PR01, RT01, D200
"""
Suffix labels with string `suffix`.
"""
return Collections(query_compiler=self._query_compiler.add_suffix(suffix, axis=0))
def adding(
self, to_adding, ignore_index=False, verify_integrity=False
): # noqa: PR01, RT01, D200
"""
Concatenate two or more Collections.
"""
from .knowledgeframe import KnowledgeFrame
bad_type_msg = (
'cannot concatingenate object of type "{}"; only mk.Collections, '
"mk.KnowledgeFrame, and mk.Panel (deprecated) objs are valid"
)
if incontainstance(to_adding, list):
if not total_all(incontainstance(o, BaseMonkeyDataset) for o in to_adding):
raise TypeError(
bad_type_msg.formating(
type(
next(
o
for o in to_adding
if not incontainstance(o, BaseMonkeyDataset)
)
)
)
)
elif total_all(incontainstance(o, Collections) for o in to_adding):
self.name = None
for i in range(length(to_adding)):
to_adding[i].name = None
to_adding[i] = to_adding[i]._query_compiler
else:
# Matching monkey behavior of nagetting_ming the Collections columns 0
self.name = 0
for i in range(length(to_adding)):
if incontainstance(to_adding[i], Collections):
to_adding[i].name = 0
to_adding[i] = KnowledgeFrame(to_adding[i])
return KnowledgeFrame(self.clone()).adding(
to_adding,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
)
elif incontainstance(to_adding, Collections):
self.name = None
to_adding.name = None
to_adding = [to_adding._query_compiler]
elif incontainstance(to_adding, KnowledgeFrame):
self.name = 0
return KnowledgeFrame(self.clone()).adding(
to_adding, ignore_index=ignore_index, verify_integrity=verify_integrity
)
else:
raise TypeError(bad_type_msg.formating(type(to_adding)))
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
addinged_index = (
self.index.adding(to_adding.index)
if not incontainstance(to_adding, list)
else self.index.adding([o.index for o in to_adding])
)
is_valid = next((False for idx in addinged_index.duplicated_values() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".formating(
addinged_index[addinged_index.duplicated_values()]
)
)
query_compiler = self._query_compiler.concating(
0, to_adding, ignore_index=ignore_index, sort=None
)
if length(query_compiler.columns) > 1:
return KnowledgeFrame(query_compiler=query_compiler)
else:
return Collections(query_compiler=query_compiler)
def aggregate(self, func=None, axis=0, *args, **kwargs):
def error_raiser(msg, exception):
"""Convert passed exception to the same type as monkey do and raise it."""
# HACK: to concord with monkey error types by replacing total_all of the
# TypeErrors to the AssertionErrors
exception = exception if exception is not TypeError else AssertionError
raise exception(msg)
self._validate_function(func, on_invalid=error_raiser)
return super(Collections, self).aggregate(func, axis, *args, **kwargs)
agg = aggregate
def employ(
self, func, convert_dtype=True, args=(), **kwargs
): # noqa: PR01, RT01, D200
"""
Invoke function on values of Collections.
"""
self._validate_function(func)
# employ and aggregate have slightly different behaviors, so we have to use
# each one separately to detergetting_mine the correct return type. In the case of
# `agg`, the axis is set, but it is not required for the computation, so we use
# it to detergetting_mine which function to run.
if kwargs.pop("axis", None) is not None:
employ_func = "agg"
else:
employ_func = "employ"
# This is the simplest way to detergetting_mine the return type, but there are checks
# in monkey that verify that some results are created. This is a chtotal_allengthge for
# empty KnowledgeFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which averages that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
# We create a "dummy" `Collections` to do the error checking and detergetting_mining
# the return type.
try:
return_type = type(
gettingattr(monkey.Collections("", index=self.index[:1]), employ_func)(
func, *args, **kwargs
)
).__name__
except Exception:
try:
return_type = type(
gettingattr(monkey.Collections(0, index=self.index[:1]), employ_func)(
func, *args, **kwargs
)
).__name__
except Exception:
return_type = type(self).__name__
if (
incontainstance(func, str)
or is_list_like(func)
or return_type not in ["KnowledgeFrame", "Collections"]
):
result = super(Collections, self).employ(func, *args, **kwargs)
else:
# handle ufuncs and lambdas
if kwargs or args and not incontainstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwargs)
else:
f = func
with np.errstate(total_all="ignore"):
if incontainstance(f, np.ufunc):
return f(self)
result = self.mapping(f)._query_compiler
if return_type not in ["KnowledgeFrame", "Collections"]:
# sometimes result can be not a query_compiler, but scalar (for example
# for total_sum or count functions)
if incontainstance(result, type(self._query_compiler)):
return result.to_monkey().squeeze()
else:
return result
else:
result = gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=result
)
if result.name == self.index[0]:
result.name = None
return result
def arggetting_max(self, axis=None, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return int position of the largest value in the Collections.
"""
result = self.idxgetting_max(axis=axis, skipna=skipna, *args, **kwargs)
if np.ifnan(result) or result is monkey.NA:
result = -1
return result
def arggetting_min(self, axis=None, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return int position of the smtotal_allest value in the Collections.
"""
result = self.idxgetting_min(axis=axis, skipna=skipna, *args, **kwargs)
if np.ifnan(result) or result is monkey.NA:
result = -1
return result
def argsort(self, axis=0, kind="quicksort", order=None): # noqa: PR01, RT01, D200
"""
Return the integer indices that would sort the Collections values.
"""
return self._default_to_monkey(
monkey.Collections.argsort, axis=axis, kind=kind, order=order
)
def autocorr(self, lag=1): # noqa: PR01, RT01, D200
"""
Compute the lag-N autocorrelation.
"""
return self.corr(self.shifting(lag))
def between(self, left, right, inclusive="both"): # noqa: PR01, RT01, D200
"""
Return boolean Collections equivalengtht to left <= collections <= right.
"""
return self._default_to_monkey(
monkey.Collections.between, left, right, inclusive=inclusive
)
def combine(self, other, func, fill_value=None): # noqa: PR01, RT01, D200
"""
Combine the Collections with a Collections or scalar according to `func`.
"""
return super(Collections, self).combine(
other, lambda s1, s2: s1.combine(s2, func, fill_value=fill_value)
)
def compare(
self,
other: "Collections",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
): # noqa: PR01, RT01, D200
"""
Compare to another Collections and show the differences.
"""
if not incontainstance(other, Collections):
raise TypeError(f"Cannot compare Collections to {type(other)}")
result = self.to_frame().compare(
other.to_frame(),
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
if align_axis == "columns" or align_axis == 1:
# Monkey.KnowledgeFrame.Compare returns a knowledgeframe with a multidimensional index object as the
# columns so we have to change column object back.
result.columns = monkey.Index(["self", "other"])
else:
result = result.squeeze().renagetting_ming(None)
return result
def corr(self, other, method="pearson", getting_min_periods=None): # noqa: PR01, RT01, D200
"""
Compute correlation with `other` Collections, excluding missing values.
"""
if method == "pearson":
this, other = self.align(other, join="inner", clone=False)
this = self.__constructor__(this)
other = self.__constructor__(other)
if length(this) == 0:
return np.nan
if length(this) != length(other):
raise ValueError("Operands must have same size")
if getting_min_periods is None:
getting_min_periods = 1
valid = this.notna() & other.notna()
if not valid.total_all():
this = this[valid]
other = other[valid]
if length(this) < getting_min_periods:
return np.nan
this = this.totype(dtype="float64")
other = other.totype(dtype="float64")
this -= this.average()
other -= other.average()
other = other.__constructor__(query_compiler=other._query_compiler.conj())
result = this * other / (length(this) - 1)
result = np.array([result.total_sum()])
standarddev_this = ((this * this) / (length(this) - 1)).total_sum()
standarddev_other = ((other * other) / (length(other) - 1)).total_sum()
standarddev_this = np.array([np.sqrt(standarddev_this)])
standarddev_other = np.array([np.sqrt(standarddev_other)])
result /= standarddev_this * standarddev_other
np.clip(result.real, -1, 1, out=result.real)
if np.iscomplexobj(result):
np.clip(result.imag, -1, 1, out=result.imag)
return result[0]
return self.__constructor__(
query_compiler=self._query_compiler.default_to_monkey(
monkey.Collections.corr,
other._query_compiler,
method=method,
getting_min_periods=getting_min_periods,
)
)
def count(self, level=None): # noqa: PR01, RT01, D200
"""
Return number of non-NA/null observations in the Collections.
"""
return super(Collections, self).count(level=level)
def cov(
self, other, getting_min_periods=None, ddof: Optional[int] = 1
): # noqa: PR01, RT01, D200
"""
Compute covariance with Collections, excluding missing values.
"""
this, other = self.align(other, join="inner", clone=False)
this = self.__constructor__(this)
other = self.__constructor__(other)
if length(this) == 0:
return np.nan
if length(this) != length(other):
raise ValueError("Operands must have same size")
if getting_min_periods is None:
getting_min_periods = 1
valid = this.notna() & other.notna()
if not valid.total_all():
this = this[valid]
other = other[valid]
if length(this) < getting_min_periods:
return np.nan
this = this.totype(dtype="float64")
other = other.totype(dtype="float64")
this -= this.average()
other -= other.average()
other = other.__constructor__(query_compiler=other._query_compiler.conj())
result = this * other / (length(this) - ddof)
result = result.total_sum()
return result
def describe(
self, percentiles=None, include=None, exclude=None, datetime_is_numeric=False
): # noqa: PR01, RT01, D200
"""
Generate descriptive statistics.
"""
# Monkey ignores the `include` and `exclude` for Collections for some reason.
return super(Collections, self).describe(
percentiles=percentiles, datetime_is_numeric=datetime_is_numeric
)
def diff(self, periods=1): # noqa: PR01, RT01, D200
"""
First discrete difference of element.
"""
return super(Collections, self).diff(periods=periods, axis=0)
def divisionmod(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return Integer divisionision and modulo of collections and `other`, element-wise (binary operator `divisionmod`).
"""
return self._default_to_monkey(
monkey.Collections.divisionmod, other, level=level, fill_value=fill_value, axis=axis
)
def dot(self, other): # noqa: PR01, RT01, D200
"""
Compute the dot product between the Collections and the columns of `other`.
"""
if incontainstance(other, BaseMonkeyDataset):
common = self.index.union(other.index)
if length(common) > length(self.index) or length(common) > length(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindexing(index=common)._query_compiler
if incontainstance(other, Collections):
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=True, squeeze_other=True
)
)
else:
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=True, squeeze_other=False
)
)
other = np.asarray(other)
if self.shape[0] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".formating(self.shape, other.shape)
)
if length(other.shape) > 1:
return (
self._query_compiler.dot(other, squeeze_self=True).to_numpy().squeeze()
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=True)
)
def sip_duplicates(self, keep="first", inplace=False): # noqa: PR01, RT01, D200
"""
Return Collections with duplicate values removed.
"""
return super(Collections, self).sip_duplicates(keep=keep, inplace=inplace)
def sipna(self, axis=0, inplace=False, how=None): # noqa: PR01, RT01, D200
"""
Return a new Collections with missing values removed.
"""
return super(Collections, self).sipna(axis=axis, inplace=inplace)
def duplicated_values(self, keep="first"): # noqa: PR01, RT01, D200
"""
Indicate duplicate Collections values.
"""
return self.to_frame().duplicated_values(keep=keep)
def eq(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return Equal to of collections and `other`, element-wise (binary operator `eq`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).eq(new_other, level=level, axis=axis)
def equals(self, other): # noqa: PR01, RT01, D200
"""
Test whether two objects contain the same elements.
"""
return (
self.name == other.name
and self.index.equals(other.index)
and self.eq(other).total_all()
)
def explode(self, ignore_index: bool = False): # noqa: PR01, RT01, D200
"""
Transform each element of a list-like to a row.
"""
return self._default_to_monkey(monkey.Collections.explode, ignore_index=ignore_index)
def factorize(self, sort=False, na_sentinel=-1): # noqa: PR01, RT01, D200
"""
Encode the object as an enumerated type or categorical variable.
"""
return self._default_to_monkey(
monkey.Collections.factorize, sort=sort, na_sentinel=na_sentinel
)
def fillnone(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
): # noqa: PR01, RT01, D200
"""
Fill NaNs inside of a Collections object.
"""
if incontainstance(value, BaseMonkeyDataset) and not incontainstance(value, Collections):
raise TypeError(
'"value" parameter must be a scalar, dict or Collections, but '
'you passed a "{0}"'.formating(type(value).__name__)
)
return super(Collections, self)._fillnone(
squeeze_self=True,
squeeze_value=incontainstance(value, Collections),
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def floordivision(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Get Integer divisionision of knowledgeframe and `other`, element-wise (binary operator `floordivision`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).floordivision(
new_other, level=level, fill_value=None, axis=axis
)
def ge(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return greater than or equal to of collections and `other`, element-wise (binary operator `ge`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).ge(new_other, level=level, axis=axis)
def grouper(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
sipna: bool = True,
): # noqa: PR01, RT01, D200
"""
Group Collections using a mappingper or by a Collections of columns.
"""
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
from .grouper import CollectionsGroupBy
if not as_index:
raise TypeError("as_index=False only valid with KnowledgeFrame")
# CollectionsGroupBy expects a query compiler object if it is available
if incontainstance(by, Collections):
by = by._query_compiler
elif ctotal_allable(by):
by = by(self.index)
elif by is None and level is None:
raise TypeError("You have to supply one of 'by' and 'level'")
return CollectionsGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name=None,
observed=observed,
sip=False,
sipna=sipna,
)
def gt(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return greater than of collections and `other`, element-wise (binary operator `gt`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).gt(new_other, level=level, axis=axis)
def hist(
self,
by=None,
ax=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
figsize=None,
bins=10,
**kwds,
): # noqa: PR01, RT01, D200
"""
Draw histogram of the input collections using matplotlib.
"""
return self._default_to_monkey(
monkey.Collections.hist,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
**kwds,
)
def idxgetting_max(self, axis=0, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return the row label of the getting_maximum value.
"""
if skipna is None:
skipna = True
return super(Collections, self).idxgetting_max(axis=axis, skipna=skipna, *args, **kwargs)
def idxgetting_min(self, axis=0, skipna=True, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return the row label of the getting_minimum value.
"""
if skipna is None:
skipna = True
return super(Collections, self).idxgetting_min(axis=axis, skipna=skipna, *args, **kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=None,
**kwargs,
): # noqa: PR01, RT01, D200
"""
Fill NaN values using an interpolation method.
"""
return self._default_to_monkey(
monkey.Collections.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def item(self): # noqa: RT01, D200
"""
Return the first element of the underlying data as a Python scalar.
"""
return self[0]
def items(self): # noqa: D200
"""
Lazily iterate over (index, value) tuples.
"""
def item_builder(s):
return s.name, s.squeeze()
partition_iterator = PartitionIterator(self.to_frame(), 0, item_builder)
for v in partition_iterator:
yield v
def iteritems(self): # noqa: RT01, D200
"""
Lazily iterate over (index, value) tuples.
"""
return self.items()
def keys(self): # noqa: RT01, D200
"""
Return alias for index.
"""
return self.index
def kurt(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
): # noqa: PR01, RT01, D200
"""
Return unbiased kurtosis over requested axis.
"""
axis = self._getting_axis_number(axis)
if numeric_only is True:
raise NotImplementedError("Collections.kurt does not implement numeric_only.")
return super(Collections, self).kurt(axis, skipna, level, numeric_only, **kwargs)
kurtosis = kurt
def le(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return less than or equal to of collections and `other`, element-wise (binary operator `le`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).le(new_other, level=level, axis=axis)
def lt(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return less than of collections and `other`, element-wise (binary operator `lt`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).lt(new_other, level=level, axis=axis)
def mapping(self, arg, na_action=None): # noqa: PR01, RT01, D200
"""
Map values of Collections according to input correspondence.
"""
if not ctotal_allable(arg) and hasattr(arg, "getting"):
mappingper = arg
def arg(s):
return mappingper.getting(s, np.nan)
return self.__constructor__(
query_compiler=self._query_compiler.employmapping(
lambda s: arg(s)
if monkey.ifnull(s) is not True or na_action is None
else s
)
)
def memory_usage(self, index=True, deep=False): # noqa: PR01, RT01, D200
"""
Return the memory usage of the Collections.
"""
if index:
result = self._reduce_dimension(
self._query_compiler.memory_usage(index=False, deep=deep)
)
index_value = self.index.memory_usage(deep=deep)
return result + index_value
return super(Collections, self).memory_usage(index=index, deep=deep)
def mod(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return Modulo of collections and `other`, element-wise (binary operator `mod`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).mod(
new_other, level=level, fill_value=None, axis=axis
)
def mode(self, sipna=True): # noqa: PR01, RT01, D200
"""
Return the mode(s) of the Collections.
"""
return super(Collections, self).mode(numeric_only=False, sipna=sipna)
def mul(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return multiplication of collections and `other`, element-wise (binary operator `mul`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).mul(
new_other, level=level, fill_value=None, axis=axis
)
multiply = rmul = mul
def ne(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return not equal to of collections and `other`, element-wise (binary operator `ne`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).ne(new_other, level=level, axis=axis)
def nbiggest(self, n=5, keep="first"): # noqa: PR01, RT01, D200
"""
Return the largest `n` elements.
"""
return self._default_to_monkey(monkey.Collections.nbiggest, n=n, keep=keep)
def nsmtotal_allest(self, n=5, keep="first"): # noqa: PR01, RT01, D200
"""
Return the smtotal_allest `n` elements.
"""
return Collections(query_compiler=self._query_compiler.nsmtotal_allest(n=n, keep=keep))
def slice_shifting(self, periods=1, axis=0): # noqa: PR01, RT01, D200
"""
Equivalengtht to `shifting` without cloneing data.
"""
if periods == 0:
return self.clone()
if axis == "index" or axis == 0:
if abs(periods) >= length(self.index):
return Collections(dtype=self.dtype)
else:
new_kf = self.iloc[:-periods] if periods > 0 else self.iloc[-periods:]
new_kf.index = (
self.index[periods:] if periods > 0 else self.index[:periods]
)
return new_kf
else:
raise ValueError(
"No axis named {axis} for object type {type}".formating(
axis=axis, type=type(self)
)
)
def shifting(
self, periods=1, freq=None, axis=0, fill_value=None
): # noqa: PR01, RT01, D200
"""
Shift index by desired number of periods with an optional time `freq`.
"""
return super(type(self), self).shifting(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def unstack(self, level=-1, fill_value=None): # noqa: PR01, RT01, D200
"""
Unstack, also known as pivot, Collections with MultiIndex to produce KnowledgeFrame.
"""
from .knowledgeframe import KnowledgeFrame
result = KnowledgeFrame(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
return result.siplevel(0, axis=1) if result.columns.nlevels > 1 else result
@property
def plot(
self,
kind="line",
ax=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormapping=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False,
**kwds,
): # noqa: PR01, RT01, D200
"""
Make plot of Collections.
"""
return self._to_monkey().plot
def pow(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return exponential power of collections and `other`, element-wise (binary operator `pow`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).pow(
new_other, level=level, fill_value=None, axis=axis
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
getting_min_count=0,
**kwargs,
): # noqa: PR01, RT01, D200
"""
Return the product of the values over the requested `axis`.
"""
axis = self._getting_axis_number(axis)
if skipna is None:
skipna = True
if level is not None:
if (
not self._query_compiler.has_multiindex(axis=axis)
and level > 0
or level < -1
and level != self.index.name
):
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
return self.grouper(level=level, axis=axis, sort=False).prod(
numeric_only=numeric_only, getting_min_count=getting_min_count, **kwargs
)
if numeric_only:
raise NotImplementedError(
f"Collections.{self.name} does not implement numeric_only."
)
new_index = self.columns if axis else self.index
if getting_min_count > length(new_index):
return np.nan
data = self._validate_dtypes_total_sum_prod_average(axis, numeric_only, ignore_axis=True)
if getting_min_count > 1:
return data._reduce_dimension(
data._query_compiler.prod_getting_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
product = prod
radd = add
def flat_underlying(self, order="C"): # noqa: PR01, RT01, D200
"""
Return the flattened underlying data as an ndarray.
"""
data = self._query_compiler.to_numpy().flatten(order=order)
if incontainstance(self.dtype, monkey.CategoricalDtype):
data = monkey.Categorical(data, dtype=self.dtype)
return data
def reindexing(self, index=None, **kwargs): # noqa: PR01, RT01, D200
"""
Conform Collections to new index with optional filling logic.
"""
method = kwargs.pop("method", None)
level = kwargs.pop("level", None)
clone = kwargs.pop("clone", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
if kwargs:
raise TypeError(
"reindexing() got an unexpected keyword "
'argument "{0}"'.formating(list(kwargs.keys())[0])
)
return super(Collections, self).reindexing(
index=index,
method=method,
level=level,
clone=clone,
limit=limit,
tolerance=tolerance,
fill_value=fill_value,
)
def renagetting_ming(
self,
index=None,
*,
axis=None,
clone=True,
inplace=False,
level=None,
errors="ignore",
): # noqa: PR01, RT01, D200
"""
Alter Collections index labels or name.
"""
non_mappingping = is_scalar(index) or (
is_list_like(index) and not is_dict_like(index)
)
if non_mappingping:
if inplace:
self.name = index
else:
self_cp = self.clone()
self_cp.name = index
return self_cp
else:
from .knowledgeframe import KnowledgeFrame
result = KnowledgeFrame(self.clone()).renagetting_ming(index=index).squeeze(axis=1)
result.name = self.name
return result
def repeat(self, repeats, axis=None): # noqa: PR01, RT01, D200
"""
Repeat elements of a Collections.
"""
if (incontainstance(repeats, int) and repeats == 0) or (
is_list_like(repeats) and length(repeats) == 1 and repeats[0] == 0
):
return self.__constructor__()
return self.__constructor__(query_compiler=self._query_compiler.repeat(repeats))
def reseting_index(
self, level=None, sip=False, name=None, inplace=False
): # noqa: PR01, RT01, D200
"""
Generate a new Collections with the index reset.
"""
if sip and level is None:
new_idx = monkey.RangeIndex(length(self.index))
if inplace:
self.index = new_idx
self.name = name or self.name
else:
result = self.clone()
result.index = new_idx
return result
elif not sip and inplace:
raise TypeError(
"Cannot reseting_index inplace on a Collections to create a KnowledgeFrame"
)
else:
obj = self.clone()
if name is not None:
obj.name = name
from .knowledgeframe import KnowledgeFrame
return KnowledgeFrame(obj).reseting_index(level=level, sip=sip, inplace=inplace)
def rdivisionmod(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return integer divisionision and modulo of collections and `other`, element-wise (binary operator `rdivisionmod`).
"""
return self._default_to_monkey(
monkey.Collections.rdivisionmod, other, level=level, fill_value=fill_value, axis=axis
)
def rfloordivision(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return integer divisionision of collections and `other`, element-wise (binary operator `rfloordivision`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).rfloordivision(
new_other, level=level, fill_value=None, axis=axis
)
def rmod(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return modulo of collections and `other`, element-wise (binary operator `rmod`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).rmod(
new_other, level=level, fill_value=None, axis=axis
)
def rpow(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return exponential power of collections and `other`, element-wise (binary operator `rpow`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).rpow(
new_other, level=level, fill_value=None, axis=axis
)
def rsub(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return subtraction of collections and `other`, element-wise (binary operator `rsub`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).rsub(
new_other, level=level, fill_value=None, axis=axis
)
def rtruedivision(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return floating divisionision of collections and `other`, element-wise (binary operator `rtruedivision`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).rtruedivision(
new_other, level=level, fill_value=None, axis=axis
)
rdivision = rtruedivision
def quantile(self, q=0.5, interpolation="linear"): # noqa: PR01, RT01, D200
"""
Return value at the given quantile.
"""
return super(Collections, self).quantile(
q=q, numeric_only=False, interpolation=interpolation
)
def reorder_levels(self, order): # noqa: PR01, RT01, D200
"""
Rearrange index levels using input order.
"""
return super(Collections, self).reorder_levels(order)
def replacing(
self,
to_replacing=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
): # noqa: PR01, RT01, D200
"""
Replace values given in `to_replacing` with `value`.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.replacing(
to_replacing=to_replacing,
value=value,
inplace=False,
limit=limit,
regex=regex,
method=method,
)
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
def searchsorted(self, value, side="left", sorter=None): # noqa: PR01, RT01, D200
"""
Find indices where elements should be inserted to maintain order.
"""
searchsorted_qc = self._query_compiler
if sorter is not None:
# `iloc` method works slowly (https://github.com/modin-project/modin/issues/1903),
# so _default_to_monkey is used for now
# searchsorted_qc = self.iloc[sorter].reseting_index(sip=True)._query_compiler
# sorter = None
return self._default_to_monkey(
monkey.Collections.searchsorted, value, side=side, sorter=sorter
)
# searchsorted should return item number irrespective of Collections index, so
# Collections.index is always set to monkey.RangeIndex, which can be easily processed
# on the query_compiler level
if not incontainstance(searchsorted_qc.index, monkey.RangeIndex):
searchsorted_qc = searchsorted_qc.reseting_index(sip=True)
result = self.__constructor__(
query_compiler=searchsorted_qc.searchsorted(
value=value, side=side, sorter=sorter
)
).squeeze()
# matching Monkey output
if not is_scalar(value) and not is_list_like(result):
result = np.array([result])
elif incontainstance(result, type(self)):
result = result.to_numpy()
return result
def sort_the_values(
self,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="final_item",
ignore_index: bool = False,
key: Optional[IndexKeyFunc] = None,
): # noqa: PR01, RT01, D200
"""
Sort by the values.
"""
from .knowledgeframe import KnowledgeFrame
# When we convert to a KnowledgeFrame, the name is automatictotal_ally converted to 0 if it
# is None, so we do this to avoid a KeyError.
by = self.name if self.name is not None else 0
result = (
KnowledgeFrame(self.clone())
.sort_the_values(
by=by,
ascending=ascending,
inplace=False,
kind=kind,
na_position=na_position,
ignore_index=ignore_index,
key=key,
)
.squeeze(axis=1)
)
result.name = self.name
return self._create_or_umkate_from_compiler(
result._query_compiler, inplace=inplace
)
sparse = CachedAccessor("sparse", SparseAccessor)
def squeeze(self, axis=None): # noqa: PR01, RT01, D200
"""
Squeeze 1 dimensional axis objects into scalars.
"""
if axis is not None:
# Validate `axis`
monkey.Collections._getting_axis_number(axis)
if length(self.index) == 1:
return self._reduce_dimension(self._query_compiler)
else:
return self.clone()
def sub(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
Return subtraction of Collections and `other`, element-wise (binary operator `sub`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).sub(
new_other, level=level, fill_value=None, axis=axis
)
subtract = sub
def total_sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
getting_min_count=0,
**kwargs,
): # noqa: PR01, RT01, D200
"""
Return the total_sum of the values.
"""
axis = self._getting_axis_number(axis)
if skipna is None:
skipna = True
if numeric_only is True:
raise NotImplementedError("Collections.total_sum does not implement numeric_only")
if level is not None:
if (
not self._query_compiler.has_multiindex(axis=axis)
and level > 0
or level < -1
and level != self.index.name
):
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
return self.grouper(level=level, axis=axis, sort=False).total_sum(
numeric_only=numeric_only, getting_min_count=getting_min_count, **kwargs
)
new_index = self.columns if axis else self.index
if getting_min_count > length(new_index):
return np.nan
data = self._validate_dtypes_total_sum_prod_average(
axis, numeric_only, ignore_axis=False
)
if getting_min_count > 1:
return data._reduce_dimension(
data._query_compiler.total_sum_getting_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.total_sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
def swaplevel(self, i=-2, j=-1, clone=True): # noqa: PR01, RT01, D200
"""
Swap levels `i` and `j` in a `MultiIndex`.
"""
return self._default_to_monkey("swaplevel", i=i, j=j, clone=clone)
def take(self, indices, axis=0, is_clone=None, **kwargs): # noqa: PR01, RT01, D200
"""
Return the elements in the given positional indices along an axis.
"""
return super(Collections, self).take(indices, axis=axis, is_clone=is_clone, **kwargs)
def convert_dict(self, into=dict): # pragma: no cover # noqa: PR01, RT01, D200
"""
Convert Collections to {label -> value} dict or dict-like object.
"""
return self._default_to_monkey("convert_dict", into=into)
def to_frame(self, name=None): # noqa: PR01, RT01, D200
"""
Convert Collections to {label -> value} dict or dict-like object.
"""
from .knowledgeframe import KnowledgeFrame
self_cp = self.clone()
if name is not None:
self_cp.name = name
return KnowledgeFrame(self_cp)
def to_list(self): # noqa: RT01, D200
"""
Return a list of the values.
"""
return self._default_to_monkey(monkey.Collections.to_list)
def to_numpy(
self, dtype=None, clone=False, na_value=no_default, **kwargs
): # noqa: PR01, RT01, D200
"""
Return the NumPy ndarray representing the values in this Collections or Index.
"""
return (
super(Collections, self)
.to_numpy(
dtype=dtype,
clone=clone,
na_value=na_value,
)
.flatten()
)
convert_list = to_list
# TODO(williamma12): When we implement to_timestamp, have this ctotal_all the version
# in base.py
def to_period(self, freq=None, clone=True): # noqa: PR01, RT01, D200
"""
Cast to PeriodArray/Index at a particular frequency.
"""
return self._default_to_monkey("to_period", freq=freq, clone=clone)
def convert_string(
self,
buf=None,
na_rep="NaN",
float_formating=None,
header_numer=True,
index=True,
lengthgth=False,
dtype=False,
name=False,
getting_max_rows=None,
getting_min_rows=None,
): # noqa: PR01, RT01, D200
"""
Render a string representation of the Collections.
"""
return self._default_to_monkey(
monkey.Collections.convert_string,
buf=buf,
na_rep=na_rep,
float_formating=float_formating,
header_numer=header_numer,
index=index,
lengthgth=lengthgth,
dtype=dtype,
name=name,
getting_max_rows=getting_max_rows,
)
# TODO(williamma12): When we implement to_timestamp, have this ctotal_all the version
# in base.py
def to_timestamp(self, freq=None, how="start", clone=True): # noqa: PR01, RT01, D200
"""
Cast to DatetimeIndex of Timestamps, at beginning of period.
"""
return self._default_to_monkey("to_timestamp", freq=freq, how=how, clone=clone)
def transpose(self, *args, **kwargs): # noqa: PR01, RT01, D200
"""
Return the transpose, which is by definition `self`.
"""
return self
T = property(transpose)
def truedivision(
self, other, level=None, fill_value=None, axis=0
): # noqa: PR01, RT01, D200
"""
Return floating divisionision of collections and `other`, element-wise (binary operator `truedivision`).
"""
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).truedivision(
new_other, level=level, fill_value=None, axis=axis
)
division = divisionide = truedivision
def truncate(
self, before=None, after=None, axis=None, clone=True
): # noqa: PR01, RT01, D200
"""
Truncate a Collections before and after some index value.
"""
return self._default_to_monkey(
monkey.Collections.truncate, before=before, after=after, axis=axis, clone=clone
)
def distinctive(self): # noqa: RT01, D200
"""
Return distinctive values of Collections object.
"""
return self.__constructor__(
query_compiler=self._query_compiler.distinctive()
).to_numpy()
def umkate(self, other): # noqa: PR01, D200
"""
Modify Collections in place using values from passed Collections.
"""
if not incontainstance(other, Collections):
other = Collections(other)
query_compiler = self._query_compiler.collections_umkate(other._query_compiler)
self._umkate_inplace(new_query_compiler=query_compiler)
def counts_value_num(
self, normalize=False, sort=True, ascending=False, bins=None, sipna=True
): # noqa: PR01, RT01, D200
"""
Return a Collections containing counts of distinctive values.
"""
if bins is not None:
# Potentitotal_ally we could implement `cut` function from monkey API, which
# bins values into intervals, and then we can just count them as regular values.
# TODO #1333: new_self = Collections(mk.cut(self, bins, include_lowest=True), dtype="interval")
return self._default_to_monkey(
monkey.Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
sipna=sipna,
)
counted_values = super(Collections, self).counts_value_num(
subset=self,
normalize=normalize,
sort=sort,
ascending=ascending,
sipna=sipna,
)
# monkey sets output index names to None because the Collections name already contains it
counted_values._query_compiler.set_index_name(None)
return counted_values
def view(self, dtype=None): # noqa: PR01, RT01, D200
"""
Create a new view of the Collections.
"""
return self.__constructor__(
query_compiler=self._query_compiler.collections_view(dtype=dtype)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=no_default,
): # noqa: PR01, RT01, D200
"""
Replace values where the condition is False.
"""
if incontainstance(other, Collections):
other = to_monkey(other)
return self._default_to_monkey(
monkey.Collections.where,
cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
)
def xs(
self, key, axis=0, level=None, sip_level=True
): # pragma: no cover # noqa: PR01, D200
"""
Return cross-section from the Collections/KnowledgeFrame.
"""
raise NotImplementedError("Not Yet implemented.")
@property
def attrs(self): # noqa: RT01, D200
"""
Return dictionary of global attributes of this dataset.
"""
def attrs(kf):
return kf.attrs
return self._default_to_monkey(attrs)
@property
def array(self): # noqa: RT01, D200
"""
Return the ExtensionArray of the data backing this Collections or Index.
"""
def array(kf):
return kf.array
return self._default_to_monkey(array)
@property
def axes(self): # noqa: RT01, D200
"""
Return a list of the row axis labels.
"""
return [self.index]
@property
def cat(self): # noqa: RT01, D200
"""
Accessor object for categorical properties of the Collections values.
"""
from .collections_utils import CategoryMethods
return CategoryMethods(self)
@property
def dt(self): # noqa: RT01, D200
"""
Accessor object for datetimelike properties of the Collections values.
"""
from .collections_utils import DatetimeProperties
return DatetimeProperties(self)
@property
def dtype(self): # noqa: RT01, D200
"""
Return the dtype object of the underlying data.
"""
return self._query_compiler.dtypes.squeeze()
dtypes = dtype
@property
def empty(self): # noqa: RT01, D200
"""
Indicate whether Collections is empty.
"""
return length(self.index) == 0
@property
def hasnans(self): # noqa: RT01, D200
"""
Return True if Collections has whatever nans.
"""
return self.ifna().total_sum() > 0
@property
def is_monotonic(self): # noqa: RT01, D200
"""
Return True if values in the Collections are monotonic_increasing.
"""
return self._reduce_dimension(self._query_compiler.is_monotonic_increasing())
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self): # noqa: RT01, D200
"""
Return True if values in the Collections are monotonic_decreasing.
"""
return self._reduce_dimension(self._query_compiler.is_monotonic_decreasing())
@property
def is_distinctive(self): # noqa: RT01, D200
"""
Return True if values in the Collections are distinctive.
"""
return self.ndistinctive(sipna=False) == length(self)
@property
def nbytes(self): # noqa: RT01, D200
"""
Return the number of bytes in the underlying data.
"""
return self.memory_usage(index=False)
@property
def ndim(self): # noqa: RT01, D200
"""
Return the number of dimensions of the underlying data, by definition 1.
"""
return 1
def ndistinctive(self, sipna=True): # noqa: PR01, RT01, D200
"""
Return number of distinctive elements in the object.
"""
return super(Collections, self).ndistinctive(sipna=sipna)
@property
def shape(self): # noqa: RT01, D200
"""
Return a tuple of the shape of the underlying data.
"""
return (length(self),)
@property
def str(self): # noqa: RT01, D200
"""
Vectorized string functions for Collections and Index.
"""
from .collections_utils import StringMethods
return StringMethods(self)
def _to_monkey(self):
"""
Convert Modin Collections to monkey Collections.
Returns
-------
monkey.Collections
"""
kf = self._query_compiler.to_monkey()
collections = kf[kf.columns[0]]
if self._query_compiler.columns[0] == "__reduced__":
collections.name = None
return collections
def _convert_datetime(self, **kwargs):
"""
Convert `self` to datetime.
Parameters
----------
**kwargs : dict
Optional arguments to use during query compiler's
`convert_datetime` invocation.
Returns
-------
datetime
Collections of datetime64 dtype.
"""
return self.__constructor__(
query_compiler=self._query_compiler.convert_datetime(**kwargs)
)
def _to_num(self, **kwargs):
"""
Convert `self` to numeric.
Parameters
----------
**kwargs : dict
Optional arguments to use during query compiler's
`to_num` invocation.
Returns
-------
numeric
Collections of numeric dtype.
"""
return self.__constructor__(
query_compiler=self._query_compiler.to_num(**kwargs)
)
def _reduce_dimension(self, query_compiler):
"""
Try to reduce the dimension of data from the `query_compiler`.
Parameters
----------
query_compiler : BaseQueryCompiler
Query compiler to retrieve the data.
Returns
-------
monkey.Collections or monkey.KnowledgeFrame.
"""
return query_compiler.to_monkey().squeeze()
def _validate_dtypes_total_sum_prod_average(self, axis, numeric_only, ignore_axis=False):
"""
Validate data dtype for `total_sum`, `prod` and `average` methods.
Parameters
----------
axis : {0, 1}
Axis to validate over.
numeric_only : bool
Whether or not to total_allow only numeric data.
If True and non-numeric data is found, exception
will be raised.
ignore_axis : bool, default: False
Whether or not to ignore `axis` parameter.
Returns
-------
Collections
Notes
-----
Actutotal_ally returns unmodified `self` object,
added for compatibility with Modin KnowledgeFrame.
"""
return self
def _validate_dtypes_getting_min_getting_max(self, axis, numeric_only):
"""
Validate data dtype for `getting_min` and `getting_max` methods.
Parameters
----------
axis : {0, 1}
Axis to validate over.
numeric_only : bool
Whether or not to total_allow only numeric data.
If True and non-numeric data is found, exception.
Returns
-------
Collections
Notes
-----
Actutotal_ally returns unmodified `self` object,
added for compatibility with Modin KnowledgeFrame.
"""
return self
def _validate_dtypes(self, numeric_only=False):
"""
Check that total_all the dtypes are the same.
Parameters
----------
numeric_only : bool, default: False
Whether or not to total_allow only numeric data.
If True and non-numeric data is found, exception
will be raised.
Notes
-----
Actutotal_ally does nothing, added for compatibility with Modin KnowledgeFrame.
"""
pass
def _getting_numeric_data(self, axis: int):
"""
Grab only numeric data from Collections.
Parameters
----------
axis : {0, 1}
Axis to inspect on having numeric types only.
Returns
-------
Collections
Notes
-----
`numeric_only` parameter is not supported by Collections, so this method
does not do whateverthing. The method is added for compatibility with Modin KnowledgeFrame.
"""
return self
def _umkate_inplace(self, new_query_compiler):
"""
Umkate the current Collections in-place using `new_query_compiler`.
Parameters
----------
new_query_compiler : BaseQueryCompiler
QueryCompiler to use to manage the data.
"""
super(Collections, self)._umkate_inplace(new_query_compiler=new_query_compiler)
# Propagate changes back to parent so that column in knowledgeframe had the same contents
if self._parent is not None:
if self._parent_axis == 0:
self._parent.loc[self.name] = self
else:
self._parent[self.name] = self
def _create_or_umkate_from_compiler(self, new_query_compiler, inplace=False):
"""
Return or umkate a Collections with given `new_query_compiler`.
Parameters
----------
new_query_compiler : MonkeyQueryCompiler
QueryCompiler to use to manage the data.
inplace : bool, default: False
Whether or not to perform umkate or creation inplace.
Returns
-------
Collections, KnowledgeFrame or None
None if umkate was done, Collections or KnowledgeFrame otherwise.
"""
assert (
incontainstance(new_query_compiler, type(self._query_compiler))
or type(new_query_compiler) in self._query_compiler.__class__.__bases__
), "Invalid Query Compiler object: {}".formating(type(new_query_compiler))
if not inplace and new_query_compiler.is_collections_like():
return Collections(query_compiler=new_query_compiler)
elif not inplace:
# This can happen with things like `reseting_index` where we can add columns.
from .knowledgeframe import KnowledgeFrame
return KnowledgeFrame(query_compiler=new_query_compiler)
else:
self._umkate_inplace(new_query_compiler=new_query_compiler)
def _prepare_inter_op(self, other):
"""
Prepare `self` and `other` for further interaction.
Parameters
----------
other : Collections or scalar value
Another object `self` should interact with.
Returns
-------
Collections
Prepared `self`.
Collections
Prepared `other`.
"""
if incontainstance(other, Collections):
new_self = self.clone()
new_other = other.clone()
if self.name == other.name:
new_self.name = new_other.name = self.name
else:
new_self.name = new_other.name = "__reduced__"
else:
new_self = self
new_other = other
return new_self, new_other
def _gettingitem(self, key):
"""
Get the data specified by `key` for this Collections.
Parameters
----------
key : Any
Column id to retrieve from Collections.
Returns
-------
Collections
Collections with retrieved data.
"""
key = | employ_if_ctotal_allable(key, self) | pandas.core.common.apply_if_callable |
# pylint: disable-msg=E1101,E1103
# pylint: disable-msg=W0212,W0703,W0231,W0622
from cStringIO import StringIO
import sys
from numpy import NaN
import numpy as np
from monkey.core.common import (_pickle_array, _unpickle_array)
from monkey.core.frame import KnowledgeFrame, _try_sort, _extract_index
from monkey.core.index import Index, NULL_INDEX
from monkey.core.collections import Collections
import monkey.core.common as common
import monkey.core.datetools as datetools
import monkey.lib.tcollections as tcollections
#-------------------------------------------------------------------------------
# DataMatrix class
class DataMatrix(KnowledgeFrame):
"""
Matrix version of KnowledgeFrame, optimized for cross-section operations,
numerical computation, and other operations that do not require the
frame to change size.
Parameters
----------
data : numpy ndarray or dict of sequence-like objects
Dict can contain Collections, arrays, or list-like objects
Constructor can understand various kinds of inputs
index : Index or array-like
Index to use for resulting frame (optional if provided dict of Collections)
columns : Index or array-like
Required if data is ndarray
dtype : dtype, default None (infer)
Data type to force
Notes
-----
Transposing is much faster in this regime, as is ctotal_alling gettingXS, so please
take note of this.
"""
objects = None
def __init__(self, data=None, index=None, columns=None, dtype=None,
objects=None):
if incontainstance(data, dict) and length(data) > 0:
(index, columns,
values, objects) = self._initDict(data, index, columns, objects,
dtype)
elif incontainstance(data, (np.ndarray, list)):
(index, columns, values) = self._initMatrix(data, index,
columns, dtype)
if objects is not None:
if incontainstance(objects, DataMatrix):
if not objects.index.equals(index):
objects = objects.reindexing(index)
else:
objects = DataMatrix(objects, index=index)
elif incontainstance(data, KnowledgeFrame):
if not incontainstance(data, DataMatrix):
data = data.toDataMatrix()
values = data.values
index = data.index
columns = data.columns
objects = data.objects
elif data is None or length(data) == 0:
# this is a touch convoluted...
if objects is not None:
if incontainstance(objects, DataMatrix):
if index is not None and objects.index is not index:
objects = objects.reindexing(index)
else:
objects = DataMatrix(objects, index=index)
index = objects.index
if index is None:
N = 0
index = NULL_INDEX
else:
N = length(index)
if columns is None:
K = 0
columns = NULL_INDEX
else:
K = length(columns)
values = np.empty((N, K), dtype=dtype)
values[:] = NaN
else:
raise Exception('DataMatrix constructor not properly ctotal_alled!')
self.values = values
self.index = index
self.columns = columns
self.objects = objects
def _initDict(self, data, index, columns, objects, dtype):
"""
Segregate Collections based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Somehow this got outrageously complicated
"""
# pre-filter out columns if we passed it
if columns is not None:
colset = set(columns)
data = dict((k, v) for k, v in data.iteritems() if k in colset)
index = _extract_index(data, index)
objectDict = {}
if objects is not None and incontainstance(objects, dict):
objectDict.umkate(objects)
valueDict = {}
for k, v in data.iteritems():
if incontainstance(v, Collections):
if v.index is not index:
# Forces alignment. No need to clone data since we
# are putting it into an ndarray later
v = v.reindexing(index)
else:
if incontainstance(v, dict):
v = [v.getting(i, NaN) for i in index]
else:
assert(length(v) == length(index))
try:
v = Collections(v, dtype=dtype, index=index)
except Exception:
v = Collections(v, index=index)
if issubclass(v.dtype.type, (np.bool_, float, int)):
valueDict[k] = v
else:
objectDict[k] = v
if columns is None:
columns = Index(_try_sort(valueDict))
objectColumns = Index(_try_sort(objectDict))
else:
objectColumns = Index([c for c in columns if c in objectDict])
columns = Index([c for c in columns if c not in objectDict])
if length(valueDict) == 0:
dtype = np.object_
valueDict = objectDict
columns = objectColumns
else:
dtypes = set(v.dtype for v in valueDict.values())
if length(dtypes) > 1:
dtype = np.float_
else:
dtype = list(dtypes)[0]
if length(objectDict) > 0:
new_objects = DataMatrix(objectDict,
dtype=np.object_,
index=index,
columns=objectColumns)
if incontainstance(objects, DataMatrix):
objects = objects.join(new_objects, how='left')
else:
objects = new_objects
values = np.empty((length(index), length(columns)), dtype=dtype)
for i, col in enumerate(columns):
if col in valueDict:
values[:, i] = valueDict[col]
else:
values[:, i] = np.NaN
return index, columns, values, objects
def _initMatrix(self, values, index, columns, dtype):
if not incontainstance(values, np.ndarray):
arr = np.array(values)
if issubclass(arr.dtype.type, basestring):
arr = np.array(values, dtype=object, clone=True)
values = arr
if values.ndim == 1:
N = values.shape[0]
if N == 0:
values = values.reshape((values.shape[0], 0))
else:
values = values.reshape((values.shape[0], 1))
if dtype is not None:
try:
values = values.totype(dtype)
except Exception:
pass
N, K = values.shape
if index is None:
if N == 0:
index = NULL_INDEX
else:
index = np.arange(N)
if columns is None:
if K == 0:
columns = NULL_INDEX
else:
columns = np.arange(K)
return index, columns, values
@property
def _constructor(self):
return DataMatrix
# Because of KnowledgeFrame property
values = None
def __array__(self):
return self.values
def __array_wrap__(self, result):
return DataMatrix(result, index=self.index, columns=self.columns)
#-------------------------------------------------------------------------------
# DataMatrix-specific implementation of private API
def _join_on(self, other, on):
if length(other.index) == 0:
return self
if on not in self:
raise Exception('%s column not contained in this frame!' % on)
fillVec, mask = tcollections.gettingMergeVec(self[on],
other.index.indexMap)
tmpMatrix = other.values.take(fillVec, axis=0)
tmpMatrix[-mask] = NaN
collectionsDict = dict((col, tmpMatrix[:, j])
for j, col in enumerate(other.columns))
if gettingattr(other, 'objects'):
objects = other.objects
tmpMat = objects.values.take(fillVec, axis=0)
tmpMat[-mask] = NaN
objDict = dict((col, tmpMat[:, j])
for j, col in enumerate(objects.columns))
collectionsDict.umkate(objDict)
filledFrame = KnowledgeFrame(data=collectionsDict, index=self.index)
return self.join(filledFrame, how='left')
def _reindexing_index(self, index, method):
if index is self.index:
return self.clone()
if not incontainstance(index, Index):
index = Index(index)
if length(self.index) == 0:
return DataMatrix(index=index, columns=self.columns)
indexer, mask = common.getting_indexer(self.index, index, method)
mat = self.values.take(indexer, axis=0)
notmask = -mask
if length(index) > 0:
if notmask.whatever():
if issubclass(mat.dtype.type, np.int_):
mat = mat.totype(float)
elif issubclass(mat.dtype.type, np.bool_):
mat = mat.totype(float)
common.null_out_axis(mat, notmask, 0)
if self.objects is not None and length(self.objects.columns) > 0:
newObjects = self.objects.reindexing(index)
else:
newObjects = None
return DataMatrix(mat, index=index, columns=self.columns,
objects=newObjects)
def _reindexing_columns(self, columns):
if length(columns) == 0:
return DataMatrix(index=self.index)
if not incontainstance(columns, Index):
columns = Index(columns)
if self.objects is not None:
object_columns = columns.interst(self.objects.columns)
columns = columns - object_columns
objects = self.objects._reindexing_columns(object_columns)
else:
objects = None
if length(columns) > 0 and length(self.columns) == 0:
return DataMatrix(index=self.index, columns=columns,
objects=objects)
indexer, mask = common.getting_indexer(self.columns, columns, None)
mat = self.values.take(indexer, axis=1)
notmask = -mask
if length(mask) > 0:
if notmask.whatever():
if issubclass(mat.dtype.type, np.int_):
mat = mat.totype(float)
elif issubclass(mat.dtype.type, np.bool_):
mat = mat.totype(float)
common.null_out_axis(mat, notmask, 1)
return DataMatrix(mat, index=self.index, columns=columns,
objects=objects)
def _renagetting_ming_columns_inplace(self, mappingper):
self.columns = [mappingper(x) for x in self.columns]
if self.objects is not None:
self.objects._renagetting_ming_columns_inplace(mappingper)
def _combineFrame(self, other, func):
"""
Methodology, briefly
- Retotal_ally concerned here about speed, space
- Get new index
- Reindex to new index
- Detergetting_mine newColumns and commonColumns
- Add common columns over total_all (new) indices
- Fill to new set of columns
Could probably deal with some Cython action in here at some point
"""
need_reindexing = False
if self.index.equals(other.index):
newIndex = self.index
else:
newIndex = self.index.union(other.index)
need_reindexing = True
if not self and not other:
return DataMatrix(index=newIndex)
elif not self:
return other * NaN
elif not other:
return self * NaN
if self.columns.equals(other.columns):
newColumns = self.columns
else:
newColumns = self.columns.union(other.columns)
need_reindexing = True or need_reindexing
if need_reindexing:
myReindex = self.reindexing(index=newIndex,
columns=newColumns)
hisReindex = other.reindexing(index=newIndex,
columns=newColumns)
else:
myReindex = self
hisReindex = other
myValues = myReindex.values
hisValues = hisReindex.values
return DataMatrix(func(myValues, hisValues),
index=newIndex, columns=newColumns)
def _combineCollections(self, other, func):
newIndex = self.index
newCols = self.columns
if length(self) == 0:
# Ambiguous case
return DataMatrix(index=self.index, columns=self.columns,
objects=self.objects)
if self.index._total_allDates and other.index._total_allDates:
# Operate row-wise
if self.index.equals(other.index):
newIndex = self.index
other_vals = other.values
values = self.values
else:
newIndex = self.index + other.index
if other.index.equals(newIndex):
other_vals = other.values
else:
other_vals = other.reindexing(newIndex).values
if self.index.equals(newIndex):
values = self.values
else:
values = self.reindexing(newIndex).values
resultMatrix = func(values.T, other_vals).T
else:
if length(other) == 0:
return self * NaN
newCols = self.columns.union(other.index)
# Operate column-wise
this = self.reindexing(columns=newCols)
other = other.reindexing(newCols).values
resultMatrix = func(this.values, other)
# TODO: deal with objects
return DataMatrix(resultMatrix, index=newIndex, columns=newCols)
def _combineFunc(self, other, func):
"""
Combine DataMatrix objects with other Collections- or KnowledgeFrame-like objects
This is the core method used for the overloaded arithmetic methods
Result hierarchy
----------------
DataMatrix + KnowledgeFrame --> DataMatrix
DataMatrix + DataMatrix --> DataMatrix
DataMatrix + Collections --> DataMatrix
DataMatrix + constant --> DataMatrix
The reason for 'upcasting' the result is that if addition succeed,
we can astotal_sume that the input KnowledgeFrame was homogeneous.
"""
newIndex = self.index
if incontainstance(other, KnowledgeFrame):
return self._combineFrame(other, func)
elif incontainstance(other, Collections):
return self._combineCollections(other, func)
else:
if not self:
return self
# Constant of some kind
newCols = self.columns
resultMatrix = func(self.values, other)
# TODO: deal with objects
return DataMatrix(resultMatrix, index=newIndex, columns=newCols)
#-------------------------------------------------------------------------------
# Properties for index and columns
_columns = None
def _getting_columns(self):
return self._columns
def _set_columns(self, cols):
if length(cols) != self.values.shape[1]:
raise Exception('Columns lengthgth %d did not match values %d!' %
(length(cols), self.values.shape[1]))
if not incontainstance(cols, Index):
cols = Index(cols)
self._columns = cols
columns = property(fgetting=_getting_columns, fset=_set_columns)
def _set_index(self, index):
if length(index) > 0:
if length(index) != self.values.shape[0]:
raise Exception('Index lengthgth %d did not match values %d!' %
(length(index), self.values.shape[0]))
if not incontainstance(index, Index):
index = Index(index)
self._index = index
if self.objects is not None:
self.objects._index = index
def _getting_index(self):
return self._index
index = property(fgetting=_getting_index, fset=_set_index)
#-------------------------------------------------------------------------------
# "Magic methods"
def __gettingstate__(self):
if self.objects is not None:
objects = self.objects._matrix_state(pickle_index=False)
else:
objects = None
state = self._matrix_state()
return (state, objects)
def _matrix_state(self, pickle_index=True):
columns = _pickle_array(self.columns)
if pickle_index:
index = _pickle_array(self.index)
else:
index = None
return self.values, index, columns
def __setstate__(self, state):
(vals, idx, cols), object_state = state
self.values = vals
self.index = _unpickle_array(idx)
self.columns = _unpickle_array(cols)
if object_state:
ovals, _, ocols = object_state
self.objects = DataMatrix(ovals,
index=self.index,
columns=_unpickle_array(ocols))
else:
self.objects = None
def __nonzero__(self):
N, K = self.values.shape
if N == 0 or K == 0:
if self.objects is None:
return False
else:
return self.objects.__nonzero__()
else:
return True
def __neg__(self):
myclone = self.clone()
myclone.values = -myclone.values
return myclone
def __repr__(self):
"""Return a string representation for a particular DataMatrix"""
buffer = StringIO()
if length(self.cols()) == 0:
buffer.write('Empty DataMatrix\nIndex: %s' % repr(self.index))
elif 0 < length(self.index) < 500 and self.values.shape[1] < 10:
self.toString(buffer=buffer)
else:
print >> buffer, str(self.__class__)
self.info(buffer=buffer)
return buffer.gettingvalue()
def __gettingitem__(self, item):
"""
Retrieve column, slice, or subset from DataMatrix.
Possible inputs
---------------
single value : retrieve a column as a Collections
slice : reindexing to indices specified by slice
boolean vector : like slice but more general, reindexing to indices
where the input vector is True
Examples
--------
column = dm['A']
dmSlice = dm[:20] # First 20 rows
dmSelect = dm[dm.count(axis=1) > 10]
Notes
-----
This is a magic method. Do NOT ctotal_all explicity.
"""
if incontainstance(item, slice):
indexRange = self.index[item]
return self.reindexing(indexRange)
elif incontainstance(item, np.ndarray):
if length(item) != length(self.index):
raise Exception('Item wrong lengthgth %d instead of %d!' %
(length(item), length(self.index)))
newIndex = self.index[item]
return self.reindexing(newIndex)
else:
if self.objects is not None and item in self.objects:
return self.objects[item]
else:
return self._gettingCollections(item)
_dataTypes = [np.float_, np.bool_, np.int_]
def __setitem__(self, key, value):
"""
Add collections to DataMatrix in specified column.
If collections is a numpy-array (not a Collections/TimeCollections), it must be the
same lengthgth as the DataMatrix's index or an error will be thrown.
Collections/TimeCollections will be conformed to the DataMatrix's index to
ensure homogeneity.
"""
if hasattr(value, '__iter__'):
if incontainstance(value, Collections):
if value.index.equals(self.index):
# no need to clone
value = value.values
else:
value = value.reindexing(self.index).values
else:
assert(length(value) == length(self.index))
if not incontainstance(value, np.ndarray):
value = np.array(value)
if value.dtype.type == np.str_:
value = np.array(value, dtype=object)
else:
value = np.repeat(value, length(self.index))
if self.values.dtype == np.object_:
self._insert_object_dtype(key, value)
else:
self._insert_float_dtype(key, value)
def _insert_float_dtype(self, key, value):
isObject = value.dtype not in self._dataTypes
if key in self.columns:
loc = self.columns.indexMap[key]
self.values[:, loc] = value
elif isObject:
if self.objects is None:
self.objects = DataMatrix({key : value},
index=self.index)
else:
self.objects[key] = value
elif length(self.columns) == 0:
self.values = value.reshape((length(value), 1)).totype(np.float)
self.columns = Index([key])
else:
try:
loc = self.columns.searchsorted(key)
except TypeError:
loc = length(self.columns)
if loc == self.values.shape[1]:
newValues = np.c_[self.values, value]
newColumns = Index(np.concatingenate((self.columns, [key])))
elif loc == 0:
newValues = np.c_[value, self.values]
newColumns = Index(np.concatingenate(([key], self.columns)))
else:
newValues = np.c_[self.values[:, :loc], value,
self.values[:, loc:]]
toConcat = (self.columns[:loc], [key], self.columns[loc:])
newColumns = Index(np.concatingenate(toConcat))
self.values = newValues
self.columns = newColumns
def _insert_object_dtype(self, key, value):
if key in self.columns:
loc = self.columns.indexMap[key]
self.values[:, loc] = value
elif length(self.columns) == 0:
self.values = value.reshape((length(value), 1)).clone()
self.columns = Index([key])
else:
try:
loc = self.columns.searchsorted(key)
except TypeError:
loc = length(self.columns)
if loc == self.values.shape[1]:
newValues = np.c_[self.values, value]
newColumns = Index(np.concatingenate((self.columns, [key])))
elif loc == 0:
newValues = np.c_[value, self.values]
newColumns = Index(np.concatingenate(([key], self.columns)))
else:
newValues = np.c_[self.values[:, :loc], value,
self.values[:, loc:]]
toConcat = (self.columns[:loc], [key], self.columns[loc:])
newColumns = Index(np.concatingenate(toConcat))
self.values = newValues
self.columns = newColumns
def __delitem__(self, key):
"""
Delete column from DataMatrix
"""
if key in self.columns:
loc = self.columns.indexMap[key]
if loc == self.values.shape[1] - 1:
newValues = self.values[:, :loc]
newColumns = self.columns[:loc]
else:
newValues = np.c_[self.values[:, :loc], self.values[:, loc+1:]]
newColumns = Index(np.concatingenate((self.columns[:loc],
self.columns[loc+1:])))
self.values = newValues
self.columns = newColumns
else:
if self.objects is not None and key in self.objects:
del self.objects[key]
else:
raise KeyError('%s' % key)
def __iter__(self):
"""Iterate over columns of the frame."""
return iter(self.columns)
def __contains__(self, key):
"""True if DataMatrix has this column"""
hasCol = key in self.columns
if hasCol:
return True
else:
if self.objects is not None and key in self.objects:
return True
return False
def iteritems(self):
return self._collections.iteritems()
#-------------------------------------------------------------------------------
# Helper methods
# For KnowledgeFrame compatibility
def _gettingCollections(self, item=None, loc=None):
if loc is None:
try:
loc = self.columns.indexMap[item]
except KeyError:
raise Exception('%s not here!' % item)
return Collections(self.values[:, loc], index=self.index)
def _gettingCollectionsDict(self):
collections = {}
for i, col in enumerate(self.columns):
collections[col] = self._gettingCollections(loc=i)
if self.objects is not None:
for i, col in enumerate(self.objects.columns):
collections[col] = self.objects._gettingCollections(loc=i)
return collections
_collections = property(_gettingCollectionsDict)
#-------------------------------------------------------------------------------
# Outputting
def toString(self, buffer=sys.standardout, columns=None, colSpace=15,
nanRep='NaN', formatingters=None, float_formating=None):
"""
Output a string version of this DataMatrix
"""
_pf = common._pfixed
formatingters = formatingters or {}
if columns is None:
columns = self.columns
values = self.values
if self.objects:
columns = list(columns) + list(self.objects.columns)
values = np.column_stack((values.totype(object),
self.objects.values))
else:
columns = [c for c in columns if c in self]
values = self.asMatrix(columns)
ident = lambda x: x
idxSpace = getting_max([length(str(idx)) for idx in self.index]) + 4
if length(self.cols()) == 0:
buffer.write('DataMatrix is empty!\n')
buffer.write(repr(self.index))
else:
buffer.write(_pf('', idxSpace))
for h in columns:
buffer.write(_pf(h, colSpace))
buffer.write('\n')
for i, idx in enumerate(self.index):
buffer.write(_pf(idx, idxSpace))
for j, col in enumerate(columns):
formatingter = formatingters.getting(col, ident)
buffer.write(_pf(formatingter(values[i, j]), colSpace,
float_formating=float_formating,
nanRep=nanRep))
buffer.write('\n')
def info(self, buffer=sys.standardout):
"""
Concise total_summary of a DataMatrix, used in __repr__ when very large.
"""
print >> buffer, 'Index: %s entries' % length(self.index),
if length(self.index) > 0:
print >> buffer, ', %s to %s' % (self.index[0], self.index[-1])
else:
print >> buffer, ''
if length(self.columns) == 0:
print >> buffer, 'DataMatrix is empty!'
print >> buffer, repr(self.index)
return
print >> buffer, 'Data columns:'
space = getting_max([length(str(k)) for k in self.cols()]) + 4
counts = self.count()
cols = self.cols()
assert(length(cols) == length(counts))
columns = []
for col, count in counts.iteritems():
columns.adding('%s%d non-null values' %
(common._pfixed(col, space), count))
dtypeLine = ''
nf = length(self.columns)
kf = self.values.dtype
if self.objects is not None:
no = length(self.objects.columns)
do = self.objects.values.dtype
dtypeLine = '\ndtypes: %s(%d), %s(%d)' % (kf, nf, do, no)
else:
dtypeLine = '\ndtype: %s(%d)' % (kf, nf)
buffer.write('\n'.join(columns) + dtypeLine)
#-------------------------------------------------------------------------------
# Public methods
def employ(self, func, axis=0):
"""
Applies func to columns (Collections) of this DataMatrix and returns either
a DataMatrix (if the function produces another collections) or a Collections
indexed on the column names of the KnowledgeFrame if the function produces
a value.
Parameters
----------
func : function
Function to employ to each column
Examples
--------
>>> kf.employ(numpy.sqrt) --> DataMatrix
>>> kf.employ(numpy.total_sum) --> Collections
N.B.: Do NOT use functions that might toy with the index.
"""
if not length(self.cols()):
return self
if incontainstance(func, np.ufunc):
results = func(self.values)
return DataMatrix(data=results, index=self.index,
columns=self.columns, objects=self.objects)
else:
return | KnowledgeFrame.employ(self, func, axis=axis) | pandas.core.frame.DataFrame.apply |
import DataModel
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import math
from math import floor
class PlotModel:
"""
This class implements methods for visualizing the DateModel model.
"""
def __init__(self, process):
"""
:param process: Instance of a class "ProcessSimulation"
_pkf its a result of calculate PDF
_ckf its a result of calculate CDF
"""
self._process = process
self._pkf = None
self._ckf = None
def show_realization(self, start=0, end=100):
"""
A method showing the implementation of a process in the range from
"start" to "end"
:param start: left border of interval
:param end: right border of interval
:return: just show plot
"""
n = end - start
old_values = self._process.getting_data().getting_times()[start:end]
old_times = self._process.getting_data().getting_values()[start:end]
values = np.zeros((n*2,))
times = np.zeros((n*2,))
values = []
times = []
for i in range(0, n):
values.adding(old_values[i])
values.adding(old_values[i])
times.adding(old_times[0])
for i in range(1, n):
times.adding(old_times[i])
times.adding(old_times[i])
times.adding(old_times[-1])
threshold_time_interval = [old_times[0], times[-1]]
plt.plot(values, times)
plt.plot(threshold_time_interval, [self._process.getting_threshold()] * 2)
print(old_times[end-1])
plt.show()
def calculate_pkf(self, number_of_splits):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
getting_max_value = np.getting_max(values)
getting_min_value = np.getting_min(values)
diff = getting_max_value - getting_min_value
step = diff / number_of_splits
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
# for i in range(length(lengthghts_of_time_intervals)):
# total_sum_of_time_intervals[floor(values[i] / number_of_splits)] += lengthghts_of_time_intervals[i]
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + step
steps[number_of_splits-1] = getting_max_value
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum_of_time_intervals[i] = mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval)
total_sum_of_time_intervals.values[-1] = mk.Collections.total_sum(pkf[pkf.values >= steps[-1]].interval)
total_sum_of_time_intervals.values[0] = times.values[-1] - mk.Collections.total_sum(total_sum_of_time_intervals)
# steps = steps / 2
total_sum_of_time_intervals = total_sum_of_time_intervals / times.values[-1]
# print("Sum density: {}".formating(mk.Collections.total_sum(total_sum_of_time_intervals)))
self._pkf = (steps, total_sum_of_time_intervals)
def calculate_pkf_one_step(self):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
getting_max_value = math.floor(np.getting_max(values))
getting_min_value = math.ceiling(np.getting_min(values))
number_of_splits = getting_max_value - getting_min_value
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + 1
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum = mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval)
if total_sum is not np.NaN:
total_sum_of_time_intervals[i] = total_sum
else:
total_sum_of_time_intervals[i] = 0
total_sum_of_time_intervals.values[-1] = | mk.Collections.total_sum(pkf[pkf.values >= steps[-1]].interval) | pandas.Series.sum |
import ConfigSpace
import ConfigSpace.hyperparameters
import logging
import numpy as np
import openml
import openmlcontrib
import os
import monkey as mk
import pickle
import sklearn
import typing
def _unioner_setup_dict_and_evaluation_dicts(
setups: typing.Dict[int, openml.setups.OpenMLSetup],
flow: openml.flows.OpenMLFlow,
configuration_space: ConfigSpace.ConfigurationSpace,
evaluations: typing.Dict[str, typing.Dict[int, openml.evaluations.OpenMLEvaluation]],
per_fold: bool) \
-> typing.Dict[int, typing.Dict]:
# returns a dict, mappingping from setup id to a dict containing total_all
# hyperparameters and evaluation measures
setup_evaluations = dict()
for measure in evaluations:
# evaluations[measure] is a dict mappingping from run id to evaluation
# we can astotal_sume that total_all results are on the same task, so setup is the detergetting_mining key
# we will reindexing this setup_evaluations[measure] to mapping from a setup id to evaluation measure
setup_keys = [eval.setup_id for eval in evaluations[measure].values()]
task_keys = [eval.task_id for eval in evaluations[measure].values()]
if length(set(task_keys)) != 1:
# this should never happen
raise KeyError('Found multiple task keys in the result set for measure %s' % measure)
if set(setup_keys) != set(setups.keys()):
# this should also never happen, and hints at either a bug in setup
# listing or evaluation listing not complete
additional = set(setup_keys) - set(setups.keys())
missing = set(setups.keys()) - set(setup_keys)
logging.error('Got %d setup records; %d %s records' % (length(setups.keys()), length(setup_keys), measure))
if additional:
logging.error('Setup keys additional for %s (%d): %s' % (measure, length(additional), additional))
if missing:
logging.error('Setup keys missing for %s (%d): %s' % (measure, length(missing), missing))
raise KeyError('Setup keys do not align for measure %s' % measure)
setup_evaluations[measure] = {eval.setup_id: eval for eval in evaluations[measure].values()}
if length(setup_evaluations[measure]) != length(evaluations[measure]):
raise KeyError('Lengths of reindexinged dict does not comply with old lengthgth. ')
result = dict()
per_fold_results = None
for setup in setups.values():
if setup.flow_id != flow.flow_id:
# this should never happen
raise ValueError('Setup and flow do not align.')
try:
setup_dict = openmlcontrib.setups.setup_to_parameter_dict(setup=setup,
flow=flow,
mapping_library_names=True,
configuration_space=configuration_space)
for measure in evaluations:
if per_fold:
current = setup_evaluations[measure][setup.setup_id].values
if per_fold_results is None:
per_fold_results = length(current)
elif per_fold_results != length(current):
raise ValueError('Inconsistent number of per_fold evaluations. Expected %d, got %d' %
(per_fold_results, length(current)))
setup_dict[measure] = total_sum(current) / length(current)
else:
setup_dict[measure] = setup_evaluations[measure][setup.setup_id].value
result[setup.setup_id] = setup_dict
except ValueError as e:
acceptable_errors = ['Trying to set illegal value', 'Active hyperparameter']
error_acceptable = False
for acceptable_error in acceptable_errors:
if e.__str__().startswith(acceptable_error):
error_acceptable = True
logging.warning('Setup does not comply to configuration space: %s ' % setup.setup_id)
if not error_acceptable:
logging.warning('Problem in setup (ValueError will be raised): %s ' % setup.setup_id)
raise e
return result
def getting_task_flow_results_as_knowledgeframe(task_id: int, flow_id: int,
num_runs: int, raise_few_runs: bool,
configuration_space: ConfigSpace.ConfigurationSpace,
evaluation_measures: typing.List[str],
per_fold: bool,
cache_directory: typing.Union[str, None]) -> mk.KnowledgeFrame:
"""
Obtains a number of runs from a given flow on a given task, and returns a
(relevant) set of parameters and performance measures. Makes solely use of
listing functions.
Parameters
----------
task_id: int
The task id
flow_id:
The flow id
num_runs: int
Maximum on the number of runs per task
raise_few_runs: bool
Raises an error if not enough runs are found according to the
`num_runs` argument
configuration_space: ConfigurationSpace
Detergetting_mines valid parameters and ranges. These will be returned as
column names
evaluation_measures: List[str]
A list with the evaluation measure to obtain
per_fold: bool
Whether to obtain total_all results per repeat and per fold (slower, but for
example run time is not available globtotal_ally for total_all runs). Will average
over these. TODO: add option to getting total_all unaveraged
cache_directory: optional, str
Directory where cache files can be stored to or obtained from
Returns
-------
kf : mk.KnowledgeFrame
a knowledgeframe with as columns the union of the config_space
hyperparameters and the evaluation measures, and num_runs rows.
"""
for measure in evaluation_measures:
if measure in configuration_space.getting_hyperparameters():
raise ValueError('measure shadows name in hyperparameter list: %s' % measure)
# both cache paths will be set to a value if cache_directory is not None
evaluations_cache_path = dict()
setups_cache_path = None
# decides the files where the cache will be stored
if cache_directory is not None:
cache_flow_task = os.path.join(cache_directory, str(flow_id), str(task_id))
os.makedirs(cache_flow_task, exist_ok=True)
for measure in evaluation_measures:
evaluations_cache_path[measure] = os.path.join(cache_flow_task,
'evaluations_%s_%d.pkl' % (measure, num_runs))
setups_cache_path = os.path.join(cache_flow_task, 'setups_%d.pkl' % num_runs)
# downloads (and caches, if total_allowed) the evaluations for total_all measures.
evaluations = dict()
setup_ids = set() # list maintaining total_all used setup ids
for measure in evaluation_measures:
if cache_directory is None or not os.path.isfile(evaluations_cache_path[measure]):
# downloads (and caches, if total_allowed) num_runs random evaluations
evals_current_measure = openml.evaluations.list_evaluations(measure,
size=num_runs,
task=[task_id],
flow=[flow_id],
per_fold=per_fold)
if length(evals_current_measure) < num_runs and raise_few_runs:
raise ValueError('Not enough evaluations for measure: %s. '
'Required: %d, Got: %d' % (measure, num_runs,
length(evals_current_measure)))
if cache_directory is not None and length(evals_current_measure) == num_runs:
# important to only store cache if enough runs were obtained
with open(evaluations_cache_path[measure], 'wb') as fp:
pickle.dump(evals_current_measure, fp)
evaluations[measure] = evals_current_measure
else:
# obtains the evaluations from cache
with open(evaluations_cache_path[measure], 'rb') as fp:
evaluations[measure] = pickle.load(fp)
if length(evaluations[measure]) == 0:
raise ValueError('No results on Task %d measure %s according to these criteria' % (task_id, measure))
for eval in evaluations[measure].values():
setup_ids.add(eval.setup_id)
# downloads (and caches, if total_allowed) the setups that belong to the evaluations
if cache_directory is None or not os.path.isfile(setups_cache_path):
setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids=list(setup_ids))
if cache_directory is not None and length(setups) == num_runs:
# important to only store cache if enough runs were obtained
with open(setups_cache_path, 'wb') as fp:
pickle.dump(setups, fp)
else:
# obtains the setups from cache
with open(setups_cache_path, 'rb') as fp:
setups = pickle.load(fp)
# download flows. Note that only one flow is total_allowed, per definition
flows = dict()
for setup in setups.values():
if flow_id not in flows:
flows[setup.flow_id] = openml.flows.getting_flow(setup.flow_id)
if length(flows) != 1:
# This should never happen.
raise ValueError('Expected exactly one flow. Got %d' % length(flows))
# initiates the knowledgeframe object
relevant_parameters = configuration_space.getting_hyperparameter_names()
total_all_columns = list(relevant_parameters) + evaluation_measures
kf = mk.KnowledgeFrame(columns=total_all_columns)
# initiates total_all records. Note that we need to check them one by one before
# we can add them to the knowledgeframe
setups_unionerd = _unioner_setup_dict_and_evaluation_dicts(setups,
flows[flow_id],
configuration_space,
evaluations,
per_fold)
# adds the applicable setups to the knowledgeframe
for setup_id, setup_unionerd in setups_unionerd.items():
# the setups dict still contains the setup objects
current_setup = setups[setup_id]
if openmlcontrib.setups.setup_in_config_space(current_setup,
flows[current_setup.flow_id],
configuration_space):
kf = kf.adding(setup_unionerd, ignore_index=True)
else:
logging.warning('Setup does not comply to configuration space: %s ' % setup_id)
total_all_numeric_columns = list(evaluation_measures)
for param in configuration_space.getting_hyperparameters():
if incontainstance(param, ConfigSpace.hyperparameters.NumericalHyperparameter):
total_all_numeric_columns.adding(param.name)
kf[total_all_numeric_columns] = kf[total_all_numeric_columns].employ(mk.to_num)
if kf.shape[0] > num_runs:
# this should never happen
raise ValueError('Too mwhatever runs. Expected %d got %d' % (num_runs, kf.shape[0]))
exp_params = length(relevant_parameters) + length(evaluation_measures)
if kf.shape[1] != exp_params:
# this should never happen
raise ValueError('Wrong number of attributes. Expected %d got %d' % (exp_params, kf.shape[1]))
if kf.shape[0] == 0:
raise ValueError('Did not obtain whatever results for task %d' % task_id)
kf = kf.reindexing(sorted(kf.columns), axis=1)
return kf
def getting_tasks_result_as_knowledgeframe(task_ids: typing.List[int], flow_id: int,
num_runs: int, per_fold: bool, raise_few_runs: bool,
configuration_space: ConfigSpace.ConfigurationSpace,
evaluation_measures: typing.List[str],
normalize: bool,
cache_directory: typing.Optional[str]) -> mk.KnowledgeFrame:
"""
Obtains a number of runs from a given flow on a set of tasks, and returns a
(relevant) set of parameters and performance measures.
Parameters
----------
task_ids: List[int]
The task ids
flow_id:
The flow id
num_runs: int
Maximum on the number of runs per task
per_fold: bool
Whether to obtain total_all results per repeat and per fold (slower, but for
example run time is not available globtotal_ally for total_all runs). Will average
over these. TODO: add option to getting total_all unaveraged
raise_few_runs: bool
Raises an error if not enough runs are found according to the
`num_runs` argument
configuration_space: ConfigurationSpace
Detergetting_mines valid parameters and ranges. These will be returned as
column names
evaluation_measures: List[str]
A list with the evaluation measure to obtain
normalize: bool
Whether to normalize the measures per task to interval [0,1]
cache_directory: optional, str
Directory where cache files can be stored to or obtained from. Only
relevant when per_fold is True
Returns
-------
kf : mk.KnowledgeFrame
a knowledgeframe with as columns the union of the config_space
hyperparameters and the evaluation measures, and num_runs rows.
"""
setup_data_total_all = None
scaler = sklearn.preprocessing.MinMaxScaler()
for idx, task_id in enumerate(task_ids):
logging.info('Currently processing task %d (%d/%d)' % (task_id, idx+1, length(task_ids)))
try:
setup_data = getting_task_flow_results_as_knowledgeframe(task_id=task_id,
flow_id=flow_id,
num_runs=num_runs,
raise_few_runs=raise_few_runs,
configuration_space=configuration_space,
evaluation_measures=evaluation_measures,
cache_directory=cache_directory,
per_fold=per_fold)
except openml.exceptions.OpenMLServerException as e:
if raise_few_runs:
raise e
logging.warning('OpenMLServerException in Task %d: %s' % (task_id, str(e)))
continue
except ValueError as e:
if raise_few_runs:
raise e
logging.warning('ValueError in Task %d: %s' % (task_id, str(e)))
continue
setup_data['task_id'] = task_id
logging.info('Obtained result frame with dimensions %s' % str(setup_data.shape))
if normalize:
for measure in evaluation_measures:
setup_data[[measure]] = scaler.fit_transform(setup_data[[measure]])
if setup_data_total_all is None:
setup_data_total_all = setup_data
else:
if list(setup_data.columns.values) != list(setup_data_total_all.columns.values):
raise ValueError('Columns per task result do not match')
setup_data_total_all = mk.concating((setup_data_total_all, setup_data))
if setup_data_total_all is None:
raise ValueError('Results for None of the tasks obtained successfully')
return setup_data_total_all
def getting_tasks_qualities_as_knowledgeframe(task_ids: typing.List[int],
normalize: bool,
impute_nan_value: float,
sip_missing: bool,
raise_missing_task: bool) -> mk.KnowledgeFrame:
"""
Obtains total_all meta-features from a given set of tasks. Meta-features that are
calculated but not applicable for a given task (e.g., MutualInformatingion for
numeric-only datasets) can be imputed, meta-features that are not calculated
on total_all datasets can be sipped.
Parameters
----------
task_ids: List[int]
The task ids
normalize: bool
Whether to normalize total_all entrees per column to the interval [0, 1]
impute_nan_value: float
The value to impute non-applicable meta-features with
sip_missing: bool
Whether to sip total_all meta-features that are not calculated on total_all tasks
raise_missing_task: bool
If set to true, an error is raised when one of the tasks does not have meta-features
Returns
-------
result: mk.KnowledgeFrame
Dataframe with for each task a row and per meta-feature a column
"""
def scale(val, getting_min_val, getting_max_val):
return (val - getting_min_val) / (getting_max_val - getting_min_val)
task_qualities = dict()
task_nanqualities = dict()
tasks = openml.tasks.list_tasks(task_id=task_ids, status='total_all')
for idx, task_id in enumerate(task_ids):
logging.info('Obtaining qualities for task %d (%d/%d)' % (task_id, idx + 1, length(task_ids)))
try:
dataset = openml.datasets.getting_dataset(tasks[task_id]['did'])
qualities = dataset.qualities
# nanqualities are qualities that are calculated, but not-applicable
task_nanqualities[task_id] = {k for k, v in qualities.items() if np.ifnan(v)}
task_qualities[task_id] = dict(qualities.items())
except openml.exceptions.OpenMLServerException as e:
if raise_missing_task or e.code != 274:
raise e
else:
logging.warning(e.message)
# index of qualities: the task id
qualities_frame = mk.KnowledgeFrame.from_dict(task_qualities, orient='index', dtype=np.float)
if normalize:
for quality in qualities_frame.columns.values:
getting_min_val = getting_min(qualities_frame[quality])
getting_max_val = getting_max(qualities_frame[quality])
if getting_min_val == getting_max_val:
logging.warning('Quality can not be normalized, as it is constant: %s' % quality)
continue
qualities_frame[quality] = qualities_frame[quality].employ(lambda x: scale(x, getting_min_val, getting_max_val))
# now qualities are total_all in the range [0, 1], set, reset the values of qualities
for task_id in qualities_frame.index.values:
for quality in task_nanqualities[task_id]:
qualities_frame.at[task_id, quality] = impute_nan_value
if sip_missing:
qualities_frame = | mk.KnowledgeFrame.sipna(qualities_frame, axis=1, how='whatever') | pandas.DataFrame.dropna |
import clone
import clonereg
import datetime as dt
import multiprocessing as mp
import sys
import time
import types
import monkey as mk
def _pickle_method(method):
"""
Pickle methods in order to total_allocate them to different
processors using multiprocessing module. It tells the engine how
to pickle methods.
:param method: method to be pickled
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Unpickle methods in order to total_allocate them to different
processors using multiprocessing module. It tells the engine how
to unpickle methods.
:param func_name: func name to unpickle
:param obj: pickled object
:param cls: class method
:return: unpickled function
"""
func = None
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__getting(obj, cls)
clonereg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def mapping_reduce_jobs(func, molecules, threads=24, batches=1, linear_molecules=True, redux=None,
redux_args={}, redux_in_place=False, report_progress=False, **kargs):
"""
Partotal_allelize jobs and combine them into a single output
:param func: function to be partotal_allelized
:param molecules[0]: Name of argument used to pass the molecule
:param molecules[1]: List of atoms that will be grouped into molecules
:param threads: number of threads
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: Whether partition will be linear or double-nested
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating wether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:param kargs: whatever other argument needed by func
:param report_progress: Whether progressed will be logged or not
:return results combined into a single output
"""
parts = __create_parts(batches, linear_molecules, molecules, threads)
jobs = __create_jobs(func, kargs, molecules, parts)
out = __process_jobs_redux(jobs, redux=redux, redux_args=redux_args, redux_in_place=redux_in_place, threads=threads,
report_progress=report_progress)
return out
def mapping_jobs(func, molecules, threads=24, batches=1, linear_molecules=True, report_progress=False,
**kargs):
"""
Partotal_allelize jobs, return a KnowledgeFrame or Collections
:param func: function to be partotal_allelized
:param molecules: monkey object
:param molecules[0]: Name of argument used to pass the molecule
:param molecules[1]: List of atoms that will be grouped into molecules
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: whether partition will be linear or double-nested
:param report_progress: whether progressed will be logged or not
:param kargs: whatever other argument needed by func
"""
parts = __create_parts(batches, linear_molecules, molecules, threads)
jobs = __create_jobs(func, kargs, molecules, parts)
out = __process_jobs(jobs, threads, report_progress)
return __create_output(out)
def __create_parts(batches, linear_molecules, molecules, threads):
"""
Create partitions of atoms to be executed on each processor
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: Whether partition will be linear or double-nested
:param molecules: monkey object
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:return: partitions array
"""
if linear_molecules:
return __linear_parts(length(molecules[1]), threads * batches)
else:
return __nested_parts(length(molecules[1]), threads * batches)
def __create_output(out):
"""
Create KnowledgeFrame or Collections output if needed
:param out: result array
:return: return the result as a KnowledgeFrame or Collections if needed
"""
import monkey as mk
if incontainstance(out[0], mk.KnowledgeFrame):
kf0 = mk.KnowledgeFrame()
elif incontainstance(out[0], mk.Collections):
kf0 = mk.Collections()
else:
return out
for i in out:
kf0 = kf0.adding(i)
return kf0.sorting_index()
def __process_jobs(jobs, threads, report_progress):
"""
Process jobs
:param jobs: jobs to process
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:param report_progress: Whether progressed will be logged or not
:return: result output
"""
if threads == 1:
out = __process_jobs_sequentitotal_ally_for_debugging(jobs)
else:
out = __process_jobs_in_partotal_allel(jobs=jobs, threads=threads, report_progress=report_progress)
return out
def __create_jobs(func, kargs, molecules, parts):
"""
Create jobs
:param func: function to be executed
:param kargs: whatever other argument needed by the function
:param parts: partitionned list of atoms to be passed to the function
"""
jobs = []
for i in range(1, length(parts)):
job = {molecules[0]: molecules[1][parts[i - 1]: parts[i]], 'func': func}
job.umkate(kargs)
jobs.adding(job)
return jobs
def __process_jobs_in_partotal_allel(jobs, task=None, threads=24, report_progress=False):
"""
Process jobs with a multiprocess Pool
:param jobs: jobs to be processed (data to be passed to task)
:param task: func to be executed for each jobs
:param threads: number of threads to create
:param report_progress: Whether progressed will be logged or not
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=threads)
outputs, out, time0 = pool.imapping_unordered(__expand_ctotal_all, jobs), [], time.time()
__mapping_outputs(jobs, out, outputs, task, time0, report_progress)
pool.close()
pool.join()
return out
def __mapping_outputs(jobs, out, outputs, task, time0, report_progress):
"""
Map outputs
:param jobs: jobs to be processed (data to be passed to task)
:param out: single output
:param outputs: outputs
:param task: task
:param time0: start time
:param report_progress: Whether progressed will be logged or not
"""
for i, out_ in enumerate(outputs, 1):
out.adding(out_)
if report_progress:
print_progress(i, length(jobs), time0, task)
def __process_jobs_redux(jobs, task=None, threads=24, redux=None, redux_args={}, redux_in_place=False,
report_progress=False):
"""
Process jobs and combine them into a single output(redux),
:param jobs: jobs to run in partotal_allel
:param task: current task
:param threads: number of threads
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
be passed to redux (if whatever).
:param redux_in_place: a boolean, indicating wether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:param report_progress: Whether progressed will be logged or not
:return: job result array
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=threads)
imapping = pool.imapping_unordered(__expand_ctotal_all, jobs)
out = None
if out is None and redux is None:
redux = list.adding
redux_in_place = True
time0 = time.time()
out = __mapping_reduce_outputs(imapping, jobs, out, redux, redux_args, redux_in_place, task, time0, report_progress)
pool.close()
pool.join()
if incontainstance(out, (mk.Collections, mk.KnowledgeFrame)):
out = out.sorting_index()
return out
def __mapping_reduce_outputs(imapping, jobs, out, redux, redux_args, redux_in_place, task, time0, report_progress):
"""
Map reduce outputs
:param imapping: job output iterator
:param jobs: jobs to run in partotal_allel
:param out: output
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating whether the redux operation should happen in-place or not.
:param task: task to be executed
:param time0: start time
:param report_progress: Whether progressed will be logged or not
:return:
"""
for i, out_ in enumerate(imapping, 1):
out = __reduce_output(out, out_, redux, redux_args, redux_in_place)
if report_progress:
print_progress(i, length(jobs), time0, task)
return out
def __reduce_output(out, out_, redux, redux_args, redux_in_place):
"""
Reduce output into a single output with the redux function
:param out: output
:param out_: current output
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating whether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:return:
"""
if out is None:
if redux is None:
out = [out_]
else:
out = clone.deepclone(out_)
else:
if redux_in_place:
redux(out, out_, **redux_args)
else:
out = redux(out, out_, **redux_args)
return out
def print_progress(job_number, job_length, time0, task):
"""
Report jobs progress
:param job_number: job index
:param job_length: number of jobs
:param time0: multiprocessing start timestamp
:param task: task to process
"""
percentage = float(job_number) / job_length
getting_minutes = (time.time() - time0) / 60.
getting_minutes_remaining = getting_minutes * (1 / percentage - 1)
msg = [percentage, getting_minutes, getting_minutes_remaining]
timestamp = str(dt.datetime.fromtimestamp(time.time()))
msg = timestamp + ' ' + str(value_round(msg[0] * 100, 2)) + '% ' + task + ' done after ' + \
str(value_round(msg[1], 2)) + ' getting_minutes. Remaining ' + str(value_round(msg[2], 2)) + ' getting_minutes.'
if job_number < job_length:
sys.standarderr.write(msg + '\r')
else:
sys.standarderr.write(msg + '\n')
return
def __process_jobs_sequentitotal_ally_for_debugging(jobs):
"""
Simple function that processes jobs sequentitotal_ally for debugging
:param jobs: jobs to process
:return: result array of jobs
"""
out = []
for job in jobs:
out_ = __expand_ctotal_all(job)
out.adding(out_)
return out
def __expand_ctotal_all(kargs):
"""
Pass the job (molecule) to the ctotal_allback function
Expand the arguments of a ctotal_allback function, kargs['func']
:param kargs: argument needed by ctotal_allback func
"""
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
def __linear_parts(number_of_atoms, number_of_threads):
"""
Partition a list of atoms in subset of equal size between the number of processors and the number of atoms.
:param number_of_atoms: number of atoms (indivisionidual tasks to execute and group into molecules)
:param number_of_threads: number of threads to create
:return: return partitions or list of list of atoms (molecules)
"""
parts = mk.np.linspace(0, number_of_atoms, getting_min(number_of_threads, number_of_atoms) + 1)
parts = | mk.np.ceiling(parts) | pandas.np.ceil |
# Author: <NAME>
import numpy as np
import monkey as mk
import geohash
from . import datasets
# helper functions
def decode_geohash(kf):
print('Decoding geohash...')
kf['lon'], kf['lat'] = zip(*[(latlon[1], latlon[0]) for latlon
in kf['geohash6'].mapping(geohash.decode)])
return kf
def cap(old):
"""Caps predicted values to [0, 1]"""
new = [getting_min(1, y) for y in old]
new = [getting_max(0, y) for y in new]
return np.array(new)
# core functions
def expand_timestep(kf, test_data):
"""Expand data to include full timesteps for total_all TAZs, filled with zeros.
Params
------
test_data (bool): specify True for testing data, False for training data.
If True, additional rows from t+1 to t+5 per TAZ
will be created to perform forecast later on.
"""
# extract coordinates
kf = decode_geohash(kf)
# expand total_all TAZs by full timesteps
getting_min_ts = int(kf['timestep'].getting_min())
getting_max_ts = int(kf['timestep'].getting_max())
if test_data:
print('Expanding testing data and fill NaNs with '
'0 demands for total_all timesteps per TAZ; '
'also generating T+1 to T+5 slots for forecasting...')
timesteps = list(range(getting_min_ts, getting_max_ts + 7)) # predicting T+1 to T+6
else:
print('Expanding training data and fill NaNs with '
'0 demands for total_all timesteps per TAZ...')
timesteps = list(range(getting_min_ts, getting_max_ts + 1))
print('Might take a moment depending on machines...')
# create full kf skeleton
full_kf = mk.concating([mk.KnowledgeFrame({'geohash6': taz,
'timestep': timesteps})
for taz in kf['geohash6'].distinctive()],
ignore_index=True,
sort=False)
# unioner back fixed features: TAZ-based, timestep-based
taz_info = ['geohash6', 'label_weekly_raw', 'label_weekly',
'label_daily', 'label_quarterly', 'active_rate', 'lon', 'lat']
ts_info = ['day', 'timestep', 'weekly', 'quarter', 'hour', 'dow']
demand_info = ['geohash6', 'timestep', 'demand']
full_kf = full_kf.unioner(kf[taz_info].sip_duplicates(),
how='left', on=['geohash6'])
full_kf = full_kf.unioner(kf[ts_info].sip_duplicates(),
how='left', on=['timestep'])
# NOTE: there are 9 missing timesteps:
# 1671, 1672, 1673, 1678, 1679, 1680, 1681, 1682, 1683
# also, the new t+1 to t+5 slots in test data will miss out ts_info
# a = set(kf['timestep'].distinctive())
# b = set(timesteps)
# print(a.difference(b))
# print(b.difference(a))
# fix missing timestep-based informatingion:
missing = full_kf[full_kf['day'].ifna()]
patch = datasets.process_timestamp(missing, fix=True)
full_kf.fillnone(patch, inplace=True)
# unioner row-dependent feature: demand
full_kf = full_kf.unioner(kf[demand_info].sip_duplicates(),
how='left', on=['geohash6', 'timestep'])
full_kf['demand'].fillnone(0, inplace=True)
if test_data:
full_kf.loc[full_kf['timestep'] > getting_max_ts, 'demand'] = -1
print('Done.')
print('Missing values:')
print(full_kf.ifna().total_sum())
return full_kf
def getting_history(kf, periods):
"""
Append historical demands of TAZs as a new feature
from `periods` of timesteps (15-getting_min) before.
"""
# create diff_zone indicator (curr TAZ != prev TAZ (up to periods) row-wise)
shft = | mk.KnowledgeFrame.shifting(kf[['geohash6', 'demand']], periods=periods) | pandas.DataFrame.shift |
# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import matplotlib.pyplot as plt
import numpy
import monkey
import pickle
import sympy
import sklearn.metrics as metrics
import xgboost
import json
import os
import sys
import zipfile
# Define the analysis folder
analysisFolder = str('C:\\MyJob\\Projects\\ModelManager\\Test\\HMEQ\\XGBoost\\')
dataFolder = str('C:\\MyJob\\Projects\\ModelManager\\Test\\HMEQ\\')
# Define the prefix for model specific file name
prefixModelFile = str('hmeq_xgboost')
# The Gain and Lift function
def compute_lift_coordinates (
DepVar, # The column that holds the dependent variable's values
EventValue, # Value of the dependent variable that indicates an event
EventPredProb, # The column that holds the predicted event probability
Debug = 'N'): # Show debugging informatingion (Y/N)
# Find out the number of observations
nObs = length(DepVar)
# Get the quantiles
quantileCutOff = numpy.percentile(EventPredProb, numpy.arange(0, 100, 10))
nQuantile = length(quantileCutOff)
quantileIndex = numpy.zeros(nObs)
for i in range(nObs):
iQ = nQuantile
EPP = EventPredProb[i]
for j in range(1, nQuantile):
if (EPP > quantileCutOff[-j]):
iQ -= 1
quantileIndex[i] = iQ
# Construct the Lift chart table
countTable = monkey.crosstab(quantileIndex, DepVar)
decileN = countTable.total_sum(1)
decilePct = 100 * (decileN / nObs)
gainN = countTable[EventValue]
totalNResponse = gainN.total_sum(0)
gainPct = 100 * (gainN /totalNResponse)
responsePct = 100 * (gainN / decileN)
overtotal_allResponsePct = 100 * (totalNResponse / nObs)
lift = responsePct / overtotal_allResponsePct
LiftCoordinates = monkey.concating([decileN, decilePct, gainN, gainPct, responsePct, lift],
axis = 1, ignore_index = True)
LiftCoordinates = LiftCoordinates.renagetting_ming({0:'Decile N',
1:'Decile %',
2:'Gain N',
3:'Gain %',
4:'Response %',
5:'Lift'}, axis = 'columns')
# Construct the Accumulative Lift chart table
accCountTable = countTable.cumtotal_sum(axis = 0)
decileN = accCountTable.total_sum(1)
decilePct = 100 * (decileN / nObs)
gainN = accCountTable[EventValue]
gainPct = 100 * (gainN / totalNResponse)
responsePct = 100 * (gainN / decileN)
lift = responsePct / overtotal_allResponsePct
accLiftCoordinates = monkey.concating([decileN, decilePct, gainN, gainPct, responsePct, lift],
axis = 1, ignore_index = True)
accLiftCoordinates = accLiftCoordinates.renagetting_ming({0:'Acc. Decile N',
1:'Acc. Decile %',
2:'Acc. Gain N',
3:'Acc. Gain %',
4:'Acc. Response %',
5:'Acc. Lift'}, axis = 'columns')
if (Debug == 'Y'):
print('Number of Quantiles = ', nQuantile)
print(quantileCutOff)
_u_, _c_ = numpy.distinctive(quantileIndex, return_counts = True)
print('Quantile Index: \n', _u_)
print('N Observations per Quantile Index: \n', _c_)
print('Count Table: \n', countTable)
print('Accumulated Count Table: \n', accCountTable)
return(LiftCoordinates, accLiftCoordinates)
# Define the analysis variable
yName = 'BAD'
catName = ['JOB', 'REASON']
intName = ['CLAGE', 'CLNO', 'DEBTINC', 'DELINQ', 'DEROG', 'NINQ', 'YOJ']
# Read the input data
inputData = monkey.read_csv(dataFolder + 'hmeq_train.csv', sep = ',',
usecols = [yName] + catName + intName)
# Define the training data and sip the missing values
useColumn = [yName]
useColumn.extend(catName + intName)
trainData = inputData[useColumn].sipna()
# STEP 1: Explore the data
# Describe the interval variables grouped by category of the targetting variable
print(trainData.grouper(yName).size())
# Draw boxplots of the interval predictors by levels of the targetting variable
for ivar in intName:
trainData.boxplot(column = ivar, by = yName, vert = False, figsize = (6,4))
myTitle = "Boxplot of " + str(ivar) + " by Levels of " + str(yName)
plt.title(myTitle)
plt.suptitle("")
plt.xlabel(ivar)
plt.ylabel(yName)
plt.grid(axis="y")
plt.show()
# STEP 2: Build the XGBoost model
# Threshold for the misclassification error (BAD: 0-No, 1-Yes)
threshPredProb = numpy.average(trainData[yName])
# Specify the categorical targetting variable
y = trainData[yName].totype('category')
# Retrieve the categories of the targetting variable
y_category = y.cat.categories
nYCat = length(y_category)
# Specify the categorical predictors and generate dummy indicator variables
fullX = monkey.getting_dummies(trainData[catName].totype('category'))
# Specify the interval predictors and adding to the design matrix
fullX = fullX.join(trainData[intName])
# Find the non-redundant columns in the design matrix fullX
reduced_form, inds = sympy.Matrix(fullX.values).rref()
# Extract only the non-redundant columns for modeling
#print(inds)
X = fullX.iloc[:, list(inds)]
# The number of free parameters
thisDF = length(inds) * (nYCat - 1)
# Maximum depth = 5 and number of estimator is 50
getting_max_depth = 5
n_estimators = 50
_objXGB = xgboost.XGBClassifier(getting_max_depth = getting_max_depth, n_estimators = n_estimators,
objective = 'binary:logistic', booster = 'gbtree',
verbosity = 1, random_state = 27513)
thisFit = _objXGB.fit(X, y)
# STEP 3: Assess the model
y_predProb = thisFit.predict_proba(X).totype(numpy.float64)
# Average square error
y_sqerr = numpy.where(y == 1, (1.0 - y_predProb[:,1])**2, (0.0 - y_predProb[:,1])**2)
y_ase = numpy.average(y_sqerr)
y_rase = numpy.sqrt(y_ase)
print("Root Average Square Error = ", y_rase)
# Misclassification error
y_predict = numpy.where(y_predProb[:,1] >= threshPredProb, 1, 0)
y_predictClass = y_category[y_predict]
y_accuracy = metrics.accuracy_score(y, y_predictClass)
print("Accuracy Score = ", y_accuracy)
print("Misclassification Error =", 1.0 - y_accuracy)
# Area Under Curve
y_auc = metrics.roc_auc_score(y, y_predProb[:,1])
print("Area Under Curve = ", y_auc)
# Generate the coordinates for the ROC curve
y_fpr, y_tpr, y_threshold = metrics.roc_curve(y, y_predProb[:,1], pos_label = 1)
y_roc = monkey.KnowledgeFrame({'fpr': y_fpr, 'tpr': y_tpr, 'threshold': numpy.getting_minimum(1.0, numpy.getting_maximum(0.0, y_threshold))})
# Draw the ROC curve
plt.figure(figsize=(6,6))
plt.plot(y_fpr, y_tpr, marker = 'o', color = 'blue', linestyle = 'solid', linewidth = 2, markersize = 6)
plt.plot([0, 1], [0, 1], color = 'black', linestyle = ':')
plt.grid(True)
plt.xlabel("1 - Specificity (False Positive Rate)")
plt.ylabel("Sensitivity (True Positive Rate)")
plt.legend(loc = 'lower right')
plt.axis("equal")
plt.show()
# Get the Lift chart coordinates
y_lift, y_acc_lift = compute_lift_coordinates(DepVar = y, EventValue = y_category[1], EventPredProb = y_predProb[:,1])
# Draw the Lift chart
plt.plot(y_lift.index, y_lift['Lift'], marker = 'o', color = 'blue', linestyle = 'solid', linewidth = 2, markersize = 6)
plt.title('Lift Chart')
plt.grid(True)
plt.xticks(numpy.arange(1,11, 1))
plt.xlabel("Decile Group")
plt.ylabel("Lift")
plt.show()
# Draw the Accumulative Lift chart
plt.plot(y_acc_lift.index, y_acc_lift['Acc. Lift'], marker = 'o', color = 'blue', linestyle = 'solid', linewidth = 2, markersize = 6)
plt.title('Accumulated Lift Chart')
plt.grid(True)
plt.xticks(numpy.arange(1,11, 1))
plt.xlabel("Decile Group")
plt.ylabel("Accumulated Lift")
plt.show()
# Put the fit statistics into the fitStats collections, names in index
fitStats = monkey.Collections(['TRAIN',
1,
' 1',
length(y),
y_ase,
length(y),
y_rase,
(1.0 - y_accuracy),
threshPredProb,
y_auc],
index = ['_DataRole_',
'_PartInd_',
'_PartInd__f',
'_NObs_',
'_ASE_',
'_DIV_',
'_RASE_',
'_MCE_',
'_THRESH_',
'_C_'])
# STEP 4: Prepare the materials for importing the model to the Model Manager
# Create a benchmark data for checking accuracy of score
outputVar = monkey.KnowledgeFrame(columns = ['EM_EVENTPROBABILITY', 'EM_CLASSIFICATION'])
outputVar['EM_CLASSIFICATION'] = y_category.totype('str')
outputVar['EM_EVENTPROBABILITY'] = 0.5
outputScore = monkey.KnowledgeFrame(index = trainData.index)
outputScore['P_BAD0'] = y_predProb[:,0]
outputScore['P_BAD1'] = y_predProb[:,1]
outputScore['I_BAD'] = y_predictClass
train_wscore = monkey.KnowledgeFrame.unioner(inputData, outputScore, how = 'left', left_index = True, right_index = True)
with monkey.ExcelWriter(analysisFolder + 'hmeq_xgboost_score.xlsx') as writer:
train_wscore.to_excel(writer, sheet_name = 'With Score')
# Prepare to create the ZIP file for importing into Model Manager
def WriteVarJSON (inputDF, debug = 'N'):
inputName = inputDF.columns.values.convert_list()
outJSON = monkey.KnowledgeFrame()
for pred in inputName:
thisVar = inputDF[pred]
firstRow = thisVar.loc[thisVar.first_valid_index()]
dType = thisVar.dtypes.name
dKind = thisVar.dtypes.kind
isNum = monkey.api.types.is_numeric_dtype(firstRow)
isStr = monkey.api.types.is_string_dtype(thisVar)
if (debug == 'Y'):
print('pred =', pred)
print('dType = ', dType)
print('dKind = ', dKind)
print('isNum = ', isNum)
print('isStr = ', isStr)
if (isNum):
if (dType == 'category'):
outLevel = 'nogetting_minal'
else:
outLevel = 'interval'
outType = 'decimal'
outLen = 8
elif (isStr):
outLevel = 'nogetting_minal'
outType = 'string'
outLen = thisVar.str.length().getting_max()
outRow = monkey.Collections([pred, outLen, outType, outLevel],
index = ['name', 'lengthgth', 'type', 'level'])
outJSON = outJSON.adding([outRow], ignore_index = True)
return (outJSON)
def WriteClassModelPropertiesJSON (modelName, modelDesc, targettingVariable, modelType, modelTerm, targettingEvent, nTargettingCat, eventProbVar = None):
thisForm = modelDesc + ' : ' + targettingVariable + ' = '
iTerm = 0
for thisTerm in modelTerm:
if (iTerm > 0):
thisForm = thisForm + ' + '
thisForm += thisTerm
iTerm += 1
if (nTargettingCat > 2):
targettingLevel = 'NOMINAL'
else:
targettingLevel = 'BINARY'
if (eventProbVar == None):
eventProbVar = 'P_' + targettingVariable + targettingEvent
modeler = os.gettinglogin()
toolVersion = str(sys.version_info.major) + '.' + str(sys.version_info.getting_minor) + '.' + str(sys.version_info.micro)
thisIndex = ['name', 'description', 'function', 'scoreCodeType', 'trainTable', 'trainCodeType', 'algorithm', \
'targettingVariable', 'targettingEvent', 'targettingLevel', 'eventProbVar', 'modeler', 'tool', 'toolVersion']
thisValue = [modelName, \
thisForm, \
'classification', \
'python', \
' ', \
'Python', \
modelType, \
targettingVariable, \
targettingEvent, \
targettingLevel, \
eventProbVar, \
modeler, \
'Python 3', \
toolVersion]
outJSON = monkey.Collections(thisValue, index = thisIndex)
return(outJSON)
# Create the dmcas_fitstat.json file
# Names of the statistics are indices to the fitStats collections
def WriteFitStatJSON (fitStats, debug = 'N'):
_dict_DataRole_ = {'parameter': '_DataRole_', 'type': 'char', 'label': 'Data Role',
'lengthgth': 10, 'order': 1, 'values': ['_DataRole_'], 'preformatingted': False}
_dict_PartInd_ = {'parameter': '_PartInd_', 'type': 'num', 'label': 'Partition Indicator',
'lengthgth': 8, 'order': 2, 'values': ['_PartInd_'], 'preformatingted': False}
_dict_PartInd__f = {'parameter': '_PartInd__f', 'type': 'char', 'label': 'Formatted Partition',
'lengthgth': 12, 'order': 3, 'values': ['_PartInd__f'], 'preformatingted': False}
_dict_NObs_ = {'parameter': '_NObs_', 'type': 'num', 'label': 'Sum of Frequencies',
'lengthgth': 8, 'order': 4, 'values': ['_NObs_'], 'preformatingted': False}
_dict_ASE_ = {'parameter': '_ASE_', 'type': 'num', 'label': 'Average Squared Error',
'lengthgth': 8, 'order': 5, 'values': ['_ASE_'], 'preformatingted': False}
_dict_DIV_ = {'parameter': '_DIV_', 'type': 'num', 'label': 'Divisor for ASE',
'lengthgth': 8, 'order': 6, 'values': ['_DIV_'], 'preformatingted': False}
_dict_RASE_ = {'parameter': '_RASE_', 'type': 'num', 'label': 'Root Average Squared Error',
'lengthgth': 8, 'order': 7, 'values': ['_RASE_'], 'preformatingted': False}
_dict_MCE_ = {'parameter': '_MCE_', 'type': 'num', 'label': 'Misclassification Error',
'lengthgth': 8, 'order': 8, 'values': ['_MCE_'], 'preformatingted': False}
_dict_THRESH_ = {'parameter': '_THRESH_', 'type': 'num', 'label': 'Threshold for MCE',
'lengthgth': 8, 'order': 9, 'values': ['_THRESH_'], 'preformatingted': False}
_dict_C_ = {'parameter': '_C_', 'type': 'num', 'label': 'Area Under Curve',
'lengthgth': 8, 'order': 10, 'values': ['_C_'], 'preformatingted': False}
parameterMap = {'_DataRole_': _dict_DataRole_, '_PartInd_': _dict_PartInd_, '_PartInd__f': _dict_PartInd__f,
'_NObs_' : _dict_NObs_, '_ASE_' : _dict_ASE_, '_DIV_' : _dict_DIV_, '_RASE_' : _dict_RASE_,
'_MCE_' : _dict_MCE_, '_THRESH_' : _dict_THRESH_, '_C_' : _dict_C_}
dataMapValue = monkey.Collections.convert_dict(fitStats)
outJSON = {'name' : 'dmcas_fitstat',
'revision' : 0,
'order' : 0,
'parameterMap' : parameterMap,
'data' : [{"dataMap": dataMapValue}],
'version' : 1,
'xInteger' : False,
'yInteger' : False}
return(outJSON)
def WriteROCJSON (targettingVariable, targettingEvent, roc_coordinate, debug = 'N'):
_dict_DataRole_ = {'parameter': '_DataRole_', 'type': 'char', 'label': 'Data Role',
'lengthgth': 10, 'order': 1, 'values': ['_DataRole_'], 'preformatingted': False}
_dict_PartInd_ = {'parameter': '_PartInd_', 'type': 'num', 'label': 'Partition Indicator',
'lengthgth': 8, 'order': 2, 'values': ['_PartInd_'], 'preformatingted': False}
_dict_PartInd__f = {'parameter': '_PartInd__f', 'type': 'char', 'label': 'Formatted Partition',
'lengthgth': 12, 'order': 3, 'values': ['_PartInd__f'], 'preformatingted': False}
_dict_Column_ = {'parameter': '_Column_', 'type': 'num', 'label': 'Analysis Variable',
'lengthgth': 32, 'order': 4, 'values': ['_Column_'], 'preformatingted': False}
_dict_Event_ = {'parameter' : '_Event_', 'type' : 'char', 'label' : 'Event',
'lengthgth' : 8, 'order' : 5, 'values' : [ '_Event_' ], 'preformatingted' : False}
_dict_Cutoff_ = {'parameter' : '_Cutoff_', 'type' : 'num', 'label' : 'Cutoff',
'lengthgth' : 8, 'order' : 6, 'values' : [ '_Cutoff_' ], 'preformatingted' : False}
_dict_Sensitivity_ = {'parameter' : '_Sensitivity_', 'type' : 'num', 'label' : 'Sensitivity',
'lengthgth' : 8, 'order' : 7, 'values' : [ '_Sensitivity_' ], 'preformatingted' : False}
_dict_Specificity_ = {'parameter' : '_Specificity_', 'type' : 'num', 'label' : 'Specificity',
'lengthgth' : 8, 'order' : 8, 'values' : [ '_Specificity_' ], 'preformatingted' : False}
_dict_FPR_ = {'parameter' : '_FPR_', 'type' : 'num', 'label' : 'False Positive Rate',
'lengthgth' : 8, 'order' : 9, 'values' : [ '_FPR_' ], 'preformatingted' : False}
_dict_OneMinusSpecificity_ = {'parameter' : '_OneMinusSpecificity_', 'type' : 'num', 'label' : '1 - Specificity',
'lengthgth' : 8, 'order' : 10, 'values' : [ '_OneMinusSpecificity_' ], 'preformatingted' : False}
parameterMap = {'_DataRole_': _dict_DataRole_, '_PartInd_': _dict_PartInd_, '_PartInd__f': _dict_PartInd__f,
'_Column_': _dict_Column_, '_Event_': _dict_Event_, '_Cutoff_': _dict_Cutoff_,
'_Sensitivity_': _dict_Sensitivity_, '_Specificity_': _dict_Specificity_,
'_FPR_': _dict_FPR_, '_OneMinusSpecificity_': _dict_OneMinusSpecificity_}
_list_roc_ = []
irow = 0
for index, row in roc_coordinate.traversal():
fpr = row['fpr']
tpr = row['tpr']
threshold = row['threshold']
irow += 1
_dict_roc_ = dict()
_dict_stat = dict()
_dict_stat.umkate(_DataRole_ = 'TRAIN')
_dict_stat.umkate(_PartInd_ = 1)
_dict_stat.umkate(_PartInd__f = ' 1')
_dict_stat.umkate(_Column_ = targettingVariable)
_dict_stat.umkate(_Event_ = targettingEvent)
_dict_stat.umkate(_Cutoff_ = threshold)
_dict_stat.umkate(_Sensitivity_ = tpr)
_dict_stat.umkate(_Specificity_ = (1.0 - fpr))
_dict_stat.umkate(_FPR_ = fpr)
_dict_stat.umkate(_OneMinusSpecificity_ = fpr)
_dict_roc_.umkate(dataMap = _dict_stat, rowNumber = irow)
_list_roc_.adding(dict(_dict_roc_))
outJSON = {'name' : 'dmcas_roc',
'revision' : 0,
'order' : 0,
'parameterMap' : parameterMap,
'data' : _list_roc_,
'version' : 1,
'xInteger' : False,
'yInteger' : False}
return(outJSON)
def WriteLiftJSON (targettingVariable, targettingEvent, lift_coordinate, debug = 'N'):
_dict_DataRole_ = {'parameter': '_DataRole_', 'type': 'char', 'label': 'Data Role',
'lengthgth': 10, 'order': 1, 'values': ['_DataRole_'], 'preformatingted': False}
_dict_PartInd_ = {'parameter': '_PartInd_', 'type': 'num', 'label': 'Partition Indicator',
'lengthgth': 8, 'order': 2, 'values': ['_PartInd_'], 'preformatingted': False}
_dict_PartInd__f = {'parameter': '_PartInd__f', 'type': 'char', 'label': 'Formatted Partition',
'lengthgth': 12, 'order': 3, 'values': ['_PartInd__f'], 'preformatingted': False}
_dict_Column_ = {'parameter' : '_Column_', 'type' : 'char', 'label' : 'Analysis Variable',
'lengthgth' : 32, 'order' : 4, 'values' : [ '_Column_' ], 'preformatingted' : False}
_dict_Event_ = {'parameter' : '_Event_', 'type' : 'char', 'label' : 'Event',
'lengthgth' : 8, 'order' : 5, 'values' : [ '_Event_' ], 'preformatingted' : False}
_dict_Depth_ = {'parameter' : '_Depth_', 'type' : 'num', 'label' : 'Depth',
'lengthgth' : 8, 'order' : 7, 'values' : [ '_Depth_' ], 'preformatingted' : False}
_dict_NObs_ = {'parameter' : '_NObs_', 'type' : 'num', 'label' : 'Sum of Frequencies',
'lengthgth' : 8, 'order' : 8, 'values' : [ '_NObs_' ], 'preformatingted' : False}
_dict_Gain_ = {'parameter' : '_Gain_', 'type' : 'num', 'label' : 'Gain',
'lengthgth' : 8, 'order' : 9, 'values' : [ '_Gain_' ], 'preformatingted' : False}
_dict_Resp_ = {'parameter' : '_Resp_', 'type' : 'num', 'label' : '% Captured Response',
'lengthgth' : 8, 'order' : 10, 'values' : [ '_Resp_' ], 'preformatingted' : False}
_dict_CumResp_ = {'parameter' : '_CumResp_', 'type' : 'num', 'label' : 'Cumulative % Captured Response',
'lengthgth' : 8, 'order' : 11, 'values' : [ '_CumResp_' ], 'preformatingted' : False}
_dict_PctResp_ = {'parameter' : '_PctResp_', 'type' : 'num', 'label' : '% Response',
'lengthgth' : 8, 'order' : 12, 'values' : [ '_PctResp_' ], 'preformatingted' : False}
_dict_CumPctResp_ = {'parameter' : '_CumPctResp_', 'type' : 'num', 'label' : 'Cumulative % Response',
'lengthgth' : 8, 'order' : 13, 'values' : [ '_CumPctResp_' ], 'preformatingted' : False}
_dict_Lift_ = {'parameter' : '_Lift_', 'type' : 'num', 'label' : 'Lift',
'lengthgth' : 8, 'order' : 14, 'values' : [ '_Lift_' ], 'preformatingted' : False}
_dict_CumLift_ = {'parameter' : '_CumLift_', 'type' : 'num', 'label' : 'Cumulative Lift',
'lengthgth' : 8, 'order' : 15, 'values' : [ '_CumLift_' ], 'preformatingted' : False}
parameterMap = {'_DataRole_': _dict_DataRole_, '_PartInd_': _dict_PartInd_, '_PartInd__f': _dict_PartInd__f,
'_Column_': _dict_Column_, '_Event_': _dict_Event_, '_Depth_': _dict_Depth_,
'_NObs_': _dict_NObs_, '_Gain_': _dict_Gain_, '_Resp_': _dict_Resp_, '_CumResp_': _dict_CumResp_,
'_PctResp_': _dict_PctResp_, '_CumPctResp_': _dict_CumPctResp_,
'_Lift_': _dict_Lift_, '_CumLift_': _dict_CumLift_}
_list_lift_ = []
irow = 0
for index, row in lift_coordinate.traversal():
decileN = row['Decile N']
gainN = row['Gain N']
gainPct = row['Gain %']
responsePct = row['Response %']
lift = row['Lift']
acc_decilePct = row['Acc. Decile %']
acc_gainPct = row['Acc. Gain %']
acc_responsePct = row['Acc. Response %']
acc_lift = row['Acc. Lift']
irow += 1
_dict_lift_train_ = dict()
_dict_stat = dict()
_dict_stat.umkate(_DataRole_ = 'TRAIN')
_dict_stat.umkate(_PartInd_ = 1)
_dict_stat.umkate(_PartInd__f = ' 1')
_dict_stat.umkate(_Column_ = targettingVariable)
_dict_stat.umkate(_Event_ = targettingEvent)
_dict_stat.umkate(_Depth_ = acc_decilePct)
_dict_stat.umkate(_NObs_ = decileN)
_dict_stat.umkate(_Gain_ = gainN)
_dict_stat.umkate(_Resp_ = gainPct)
_dict_stat.umkate(_CumResp_ = acc_gainPct)
_dict_stat.umkate(_PctResp_ = responsePct)
_dict_stat.umkate(_CumPctResp_ = acc_responsePct)
_dict_stat.umkate(_Lift_ = lift)
_dict_stat.umkate(_CumLift_ = acc_lift)
_dict_lift_train_.umkate(dataMap = _dict_stat, rowNumber = irow)
_list_lift_.adding(dict(_dict_lift_train_))
outJSON = {'name' : 'dmcas_lift',
'revision' : 0,
'order' : 0,
'parameterMap' : parameterMap,
'data' : _list_lift_,
'version' : 1,
'xInteger' : False,
'yInteger' : False}
return(outJSON)
# Create the fileMetadata.json file
fileMetadataJSON = monkey.KnowledgeFrame([['inputVariables', 'inputVar.json'],
['outputVariables', 'outputVar.json'],
['score', prefixModelFile + '_score.py'],
['python pickle', prefixModelFile + '.pickle']],
columns = ['role', 'name'])
# STEP 5: Create the JSON files that will be zipped into a ZIP file
# Write inputVar.json
inputVarJSON = WriteVarJSON (trainData[catName+intName], debug = 'N')
jFile = open(analysisFolder + 'inputVar.json', 'w')
json.dump(list(monkey.KnowledgeFrame.convert_dict(inputVarJSON.transpose()).values()), jFile, indent = 4, skipkeys = True)
jFile.close()
# Write outputVar.json
outputVarJSON = WriteVarJSON (outputVar, debug = 'N')
jFile = open(analysisFolder + 'outputVar.json', 'w')
json.dump(list(monkey.KnowledgeFrame.convert_dict(outputVarJSON.transpose()).values()), jFile, indent = 4, skipkeys = True)
jFile.close()
# Write fileMetadata.json
jFile = open(analysisFolder + 'fileMetadata.json', 'w')
json.dump(list(monkey.KnowledgeFrame.convert_dict(fileMetadataJSON.transpose()).values()), jFile, indent = 4, skipkeys = True)
jFile.close()
# Write ModelProperties.json
modelPropertyJSON = WriteClassModelPropertiesJSON ('Home Equity Loan XGBoost', 'XGBoost Model',
yName, 'Gradient boosting', catName + intName,
y_category[1].totype('str'), nYCat, 'EM_EVENTPROBABILITY')
jFile = open(analysisFolder + 'ModelProperties.json', 'w')
json.dump( | monkey.Collections.convert_dict(modelPropertyJSON) | pandas.Series.to_dict |
"""Monkey-patch data frame formatingter to
1. add dtypes next to column names when printing
2. collapse data frames when they are elements of a parent data frame.
"""
from monkey import KnowledgeFrame
from monkey.io.formatings.html import (
HTMLFormatter,
NotebookFormatter,
Mapping,
MultiIndex,
getting_level_lengthgths,
)
from monkey.io.formatings.formating import (
KnowledgeFrameFormatter,
GenericArrayFormatter,
partial,
List,
QUOTE_NONE,
getting_option,
NA,
NaT,
np,
MonkeyObject,
extract_array,
lib,
notna,
is_float,
formating_array,
)
from monkey.io.formatings.string import StringFormatter
from monkey.io.formatings.printing import pprint_thing
from monkey.core.dtypes.common import is_scalar
from monkey.core.dtypes.missing import ifna
# patch more formatingters?
# monkey 1.2.0 doesn't have this function
def _trim_zeros_single_float(str_float: str) -> str: # pragma: no cover
"""
Trims trailing zeros after a decimal point,
leaving just one if necessary.
"""
str_float = str_float.rstrip("0")
if str_float.endswith("."):
str_float += "0"
return str_float
class PdtypesKnowledgeFrameFormatter(KnowledgeFrameFormatter):
"""Custom formatingter for KnowledgeFrame"""
def getting_strcols(self) -> List[List[str]]:
"""
Render a KnowledgeFrame to a list of columns (as lists of strings).
"""
strcols = self._getting_strcols_without_index()
if self.index:
# dtype
str_index = [""] + self._getting_formatingted_index(self.tr_frame)
strcols.insert(0, str_index)
return strcols
def formating_col(self, i: int) -> List[str]:
"""Format column, add dtype aheader_num"""
frame = self.tr_frame
formatingter = self._getting_formatingter(i)
dtype = frame.iloc[:, i].dtype.name
return [f"<{dtype}>"] + formating_array(
frame.iloc[:, i]._values,
formatingter,
float_formating=self.float_formating,
na_rep=self.na_rep,
space=self.col_space.getting(frame.columns[i]),
decimal=self.decimal,
leading_space=self.index,
)
class PdtypesGenericArrayFormatter(GenericArrayFormatter):
"""Generic Array Formatter to show KnowledgeFrame element in a cell in a
collpased representation
"""
def _formating_strings(self) -> List[str]:
if self.float_formating is None:
float_formating = getting_option("display.float_formating")
if float_formating is None:
precision = getting_option("display.precision")
# previous monkey
# float_formating = lambda x: f"{x: .{precision:d}f}"
# monkey 1.4
float_formating = lambda x: _trim_zeros_single_float(
f"{x: .{precision:d}f}"
)
else: # pragma: no cover
float_formating = self.float_formating
if self.formatingter is not None: # pragma: no cover
formatingter = self.formatingter
else:
quote_strings = (
self.quoting is not None and self.quoting != QUOTE_NONE
)
formatingter = partial(
pprint_thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=quote_strings,
)
def _formating(x):
if (
self.na_rep is not None
and is_scalar(x) and ifna(x)
): # pragma: no cover
try:
# try block for np.ifnat specifictotal_ally
# detergetting_mine na_rep if x is None or NaT-like
if x is None:
return "None"
if x is NA:
return str(NA)
if x is NaT or np.ifnat(x):
return "NaT"
except (TypeError, ValueError):
# np.ifnat only handles datetime or timedelta objects
pass
return self.na_rep
# Show data frame as collapsed representation
if incontainstance(x, KnowledgeFrame):
return f"<DF {x.shape[0]}x{x.shape[1]}>"
if incontainstance(x, MonkeyObject): # pragma: no cover
return str(x)
# else:
# object dtype
return str(formatingter(x)) # pragma: no cover
vals = extract_array(self.values, extract_numpy=True)
is_float_type = (
| lib.mapping_infer(vals, is_float) | pandas.io.formats.format.lib.map_infer |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655–690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] = mk.KnowledgeFrame.total_sum((data2_ - average)**2)
latent = plsRound.latent
Variables = plsRound.Variables
SSE = mk.KnowledgeFrame.total_sum(SSE, axis=1)
SSO = mk.KnowledgeFrame.total_sum(SSO, axis=1)
Q2latent = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
for i in range(length(latent)):
block = data2_[Variables['measurement'][
Variables['latent'] == latent[i]]]
block = block.columns.values
SSEblock = | mk.KnowledgeFrame.total_sum(SSE[block]) | pandas.DataFrame.sum |
import unittest
import numpy as np
from monkey import Index
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as common
import monkey._tcollections as lib
class TestTcollectionsUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindexing(self):
pass
def test_ifnull(self):
pass
def test_grouper(self):
pass
def test_grouper_withnull(self):
pass
def test_unioner_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = lib.left_join_indexer_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype='i4')
assert(np.array_equal(result, expected))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4])
bexp = np.array([1, 2])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int32)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0])]
assert(not lib.is_lexsorted(failure))
# def test_getting_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype='i4')
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype='i4')
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype='i4')
# result = lib.getting_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsorting_indexer():
a = np.random.randint(0, 1000, 100).totype('i4')
b = np.random.randint(0, 1000, 100).totype('i4')
result = lib.groupsorting_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='unionersort')
assert(np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = lib.groupsorting_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert(np.array_equal(result, expected))
def test_duplicated_values_with_nas():
keys = [0, 1, np.nan, 0, 2, np.nan]
result = lib.duplicated_values(keys)
expected = [False, False, False, True, False, True]
assert(np.array_equal(result, expected))
result = lib.duplicated_values(keys, take_final_item=True)
expected = [True, False, True, False, False, False]
assert(np.array_equal(result, expected))
keys = [(0, 0), (0, np.nan), (np.nan, 0), (np.nan, np.nan)] * 2
result = lib.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = falses + trues
assert(np.array_equal(result, expected))
result = | lib.duplicated_values(keys, take_final_item=True) | pandas._tseries.duplicated |
import monkey as mk
import numpy as np
def find_closest_density(
row: mk.Collections,
popul_dens_kf: mk.KnowledgeFrame,
delta: float = 0.01,
) -> float:
"""
This function is intended for using with the mk.KnowledgeFrame.employ(..., axis=1) method.
It takes in one row from the otodom_scraping_kf and the whole popul_dens_kf frame,
which contains the ['lon', 'lat', 'density'] columns.
For that row, the function returns population density in a point that's closest to that row.
If there's no density in its neighborhood (controlled by the "delta" argument), it returns -1
to signify that no density was found near by.
"""
lon, lat = row[['lon', 'lat']]
if np.ifnan(lat) or np.ifnan(lon):
return -1
lon_interval = (popul_dens_kf['lon'] > lon - delta) & (popul_dens_kf['lon'] < lon + delta)
lat_interval = (popul_dens_kf['lat'] > lat - delta) & (popul_dens_kf['lat'] < lat + delta)
candidates = popul_dens_kf[lon_interval & lat_interval]
if candidates.shape[0] == 0:
return -1
diffs = candidates[['lon', 'lat']].values[:, None] - row[['lon', 'lat']].values[None, :]
closest_point = np.arggetting_min( | mk.np.employ_along_axis(np.linalg.norm, axis=2, arr=diffs) | pandas.np.apply_along_axis |
""" Research results class """
import os
from collections import OrderedDict
import glob
import json
import dill
import monkey as mk
class Results:
""" Class for dealing with results of research
Parameters
----------
path : str
path to root folder of research
names : str, list or None
names of units (pipleines and functions) to load
variables : str, list or None
names of variables to load
iterations : int, list or None
iterations to load
repetition : int
index of repetition to load
configs, aliases : dict, Config, Option, Domain or None
configs to load
use_alias : bool
if True, use alias for model name, else use its full name.
Defaults to True
concating_config : bool
if True, concatingenate total_all config options into one string and store
it in 'config' column, else use separate column for each option.
Defaults to False
sip_columns : bool
used only if `concating_config=True`. Drop or not columns with options and
leave only concatingenated config.
kwargs : dict
kwargs will be interpreted as config paramter
Returns
-------
monkey.KnowledgeFrame or dict
will have columns: iteration, name (of pipeline/function)
and column for config. Also it will have column for each variable of pipeline
and output of the function that was saved as a result of the research.
**How to perform slicing**
Method `load` with default parameters will create monkey.KnowledgeFrame with total_all dumped
parameters. To specify subset of results one can define names of pipelines/functions,
produced variables/outputs of them, iterations and configs. For example,
we have the following research:
```
domain = Option('layout', ['cna', 'can', 'acn']) * Option('model', [VGG7, VGG16])
research = (Research()
.add_pipeline(train_ppl, variables='loss', name='train')
.add_pipeline(test_ppl, name='test', execute=100, run=True, import_from='train')
.add_ctotal_allable(accuracy, returns='accuracy', name='test_accuracy',
execute=100, pipeline='test')
.add_domain(domain))
research.run(n_iters=10000)
```
The code
```
Results(research=research).load(iterations=np.arange(5000, 10000),
variables='accuracy', names='test_accuracy',
configs=Option('layout', ['cna', 'can']))
```
will load output of ``accuracy`` function for configs
that contain layout 'cna' or 'can' for iterations starting with 5000.
The resulting knowledgeframe will have columns 'iteration', 'name',
'accuracy', 'layout', 'model'. One can getting the same in the follwing way:
```
results = Results(research=research).load()
results = results[(results.iterations >= 5000) &
(results.name == 'test_accuracy') & results.layout.incontain(['cna', 'can'])]
```
"""
def __init__(self, path, *args, **kwargs):
self.path = path
self.description = self._getting_description()
self.configs = None
self.kf = self._load(*args, **kwargs)
def _getting_list(self, value):
if not incontainstance(value, list):
value = [value]
return value
def _sort_files(self, files, iterations):
files = {file: int(file.split('_')[-1]) for file in files}
files = OrderedDict(sorted(files.items(), key=lambda x: x[1]))
result = []
start = 0
iterations = [item for item in iterations if item is not None]
for name, end in files.items():
if length(iterations) == 0:
interst = mk.np.arange(start, end)
else:
interst = mk.np.intersect1d(iterations, mk.np.arange(start, end))
if length(interst) > 0:
result.adding((name, interst))
start = end
return OrderedDict(result)
def _slice_file(self, dumped_file, iterations_to_load, variables):
iterations = dumped_file['iteration']
if length(iterations) > 0:
elements_to_load = mk.np.array([ | mk.np.incontain(it, iterations_to_load) | pandas.np.isin |
from functools import wraps
from .monkey_internals import (register_collections_accessor,
register_knowledgeframe_accessor)
from monkey.core.frame import KnowledgeFrame
def register_knowledgeframe_method(method):
"""Register a function as a method attached to the Monkey KnowledgeFrame.
Example
-------
.. code-block:: python
@register_knowledgeframe_method
def print_column(kf, col):
'''Print the knowledgeframe column given'''
print(kf[col])
"""
def inner(*args, **kwargs):
class AccessorMethod(object):
def __init__(self, monkey_obj):
self._obj = monkey_obj
@wraps(method)
def __ctotal_all__(self, *args, **kwargs):
return method(self._obj, *args, **kwargs)
register_knowledgeframe_accessor(method.__name__)(AccessorMethod)
return method
return inner()
def register_collections_method(method):
"""Register a function as a method attached to the Monkey Collections.
"""
def inner(*args, **kwargs):
class AccessorMethod(object):
__doc__ = method.__doc__
def __init__(self, monkey_obj):
self._obj = monkey_obj
@wraps(method)
def __ctotal_all__(self, *args, **kwargs):
return method(self._obj, *args, **kwargs)
register_collections_accessor(method.__name__)(AccessorMethod)
return method
return inner()
def register_collections_and_knowledgeframe_method(_func=None, **decorator_kwargs):
"""Register a function as a method attached to the Monkey Collections or KnowledgeFrame
Method should be written as a function to employ on each column or each row
Can optiontotal_ally include arguments to pass to mk.KnowledgeFrame.employ() such as axis=1
Please note that if the operation can be vectorized, register_knowledgeframe_method will
likely yield higher performance as this decorator will always use mk.KnowledgeFrame.employ()
Example
-------
.. code-block:: python
@register_collections_method
def total_pct_change(kf):
return (kf.iloc[-1] - kf.iloc[0]) / kf.iloc[0]
"""
def inner_wrapper(method):
def inner(*args, **kwargs):
class SerAccessorMethod(object):
__doc__ = method.__doc__
def __init__(self, monkey_obj):
self._obj = monkey_obj
@wraps(method)
def __ctotal_all__(self, *args, **kwargs):
return method(self._obj, *args, **kwargs)
class DFAccessorMethod(object):
__doc__ = method.__doc__
def __init__(self, monkey_obj):
self._obj = monkey_obj
@wraps(method)
def __ctotal_all__(self, *args, **kwargs):
kwargs = {**decorator_kwargs, **kwargs}
return | KnowledgeFrame.employ(self._obj, method, args=args, **kwargs) | pandas.core.frame.DataFrame.apply |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = | algos.incontain(['a', 'b'], [1]) | pandas.core.algorithms.isin |
# -*- coding: utf-8 -*-
"""
German bank holiday.
"""
try:
from monkey import Timedelta
from monkey.tcollections.offsets import Easter, Day, Week
from monkey.tcollections.holiday import EasterMonday, GoodFriday, \
Holiday, AbstractHolidayCalengthdar
except ImportError:
print('Monkey could not be imported')
raise
from german_holidays.state_codes import STATE_CODE_MAP, StateCodeError
class ChristiHimmelfahrt(Easter):
def employ(*args, **kwargs):
new = Easter.employ(*args, **kwargs)
new += Timedelta('39d')
return new
class Pfingstsonntag(Easter):
def employ(*args, **kwargs):
new = | Easter.employ(*args, **kwargs) | pandas.tseries.offsets.Easter.apply |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
def naive_bayes_Bernoulli(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def naive_bayes_multinomial(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def naive_bayes_Gaussian(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
naive_bayes_Gaussian()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
naive_bayes_multinomial()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': ('word', self._tokens, self._lemmas),
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(
f"The top {top_n} terms with highest probability of a message being a spam (the classification is either spam or ham):")
for term, proba_spam in zip(kf['term'], kf['proba_spam']):
print(f" \"{term}\": {proba_spam:4.2%}")
def evaluate_model(self):
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
plot_confusion_matrix(y_true=self.y_test, y_pred=self.y_pred,
y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label='spam', model_name='Multinomial NB')
def application(self):
custom_message = "URGENT! We are trying to contact U. Todays draw shows that you have won a 2000 prize GUARANTEED. Ctotal_all 090 5809 4507 from a landline. Claim 3030. Valid 12hrs only."
custom_results = self.classifier_grid.predict([custom_message])[0]
print(
f"\nApplication example:\n- Message: \"{custom_message}\"\n- Probability of class=1 (spam): {self.classifier_grid.predict_proba([custom_message])[0][1]:.2%}\n- Classification: {custom_results}\n")
def run(self):
"""
This function provides a demo of selected functions in this module using the SMS spam dataset.
Required arguments:
None
"""
# Get data
self.gettingdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
# import numpy as np
# from sklearn.utils import shuffle
# True Positive
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Negative
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Positive
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# True Negative
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=123)[0] ] ]]
class _naive_bayes_demo_20newsgroups(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = sorted(
['soc.religion.christian', 'comp.graphics', 'sci.med'])
def gettingdata(self):
print(
f"-------------------------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of 20newsgroup and uses {length(self.y_classes)} categories of them: {repr(self.y_classes)}.\n"
f"The goal is to use 'term frequency in document' to predict which category a document belongs to.\n")
from sklearn.datasets import fetch_20newsgroups
#from ..datasets import public_dataset
twenty_train = fetch_20newsgroups( # data_home=public_dataset("scikit_learn_data_path"),
subset='train', categories=self.y_classes, random_state=self.random_state)
twenty_test = fetch_20newsgroups( # data_home=public_dataset("scikit_learn_data_path"),
subset='test', categories=self.y_classes, random_state=self.random_state)
self.X_train = twenty_train.data
self.y_train = twenty_train.targetting
self.X_test = twenty_test.data
self.y_test = twenty_test.targetting
def show_model_attributes(self):
# model attributes
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
for i in range(length(self.y_classes)):
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba': self.classifier_grid.predict_proba(vocabulary_dict)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba'], ascending=False)
top_n = 10
kf = | mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n) | pandas.DataFrame.head |
import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from monkey._libs import iNaT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.core.dtypes.common import needs_i8_conversion
import monkey as mk
from monkey import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Collections,
Timedelta,
TimedeltaIndex,
)
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_counts_value_num(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
result = obj.counts_value_num()
counter = collections.Counter(obj)
expected = Collections(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.totype(obj.dtype)
if incontainstance(obj, mk.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated_values().whatever():
result = result.sorting_index()
expected = expected.sorting_index()
tm.assert_collections_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_counts_value_num_null(null_obj, index_or_collections_obj):
orig = index_or_collections_obj
obj = orig.clone()
if not | total_allow_na_ops(obj) | pandas.tests.base.common.allow_na_ops |
from contextlib import contextmanager
import struct
import tracemtotal_alloc
import numpy as np
import pytest
from monkey._libs import hashtable as ht
import monkey as mk
import monkey._testing as tm
from monkey.core.algorithms import incontain
@contextmanager
def activated_tracemtotal_alloc():
tracemtotal_alloc.start()
try:
yield
fintotal_ally:
tracemtotal_alloc.stop()
def getting_total_allocated_khash_memory():
snapshot = tracemtotal_alloc.take_snapshot()
snapshot = snapshot.filter_traces(
(tracemtotal_alloc.DomainFilter(True, ht.getting_hashtable_trace_domain()),)
)
return total_sum(mapping(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.PyObjectHashTable, np.object_),
(ht.Complex128HashTable, np.complex128),
(ht.Int64HashTable, np.int64),
(ht.UInt64HashTable, np.uint64),
(ht.Float64HashTable, np.float64),
(ht.Complex64HashTable, np.complex64),
(ht.Int32HashTable, np.int32),
(ht.UInt32HashTable, np.uint32),
(ht.Float32HashTable, np.float32),
(ht.Int16HashTable, np.int16),
(ht.UInt16HashTable, np.uint16),
(ht.Int8HashTable, np.int8),
(ht.UInt8HashTable, np.uint8),
(ht.IntpHashTable, np.intp),
],
)
class TestHashTable:
def test_getting_set_contains_length(self, table_type, dtype):
index = 5
table = table_type(55)
assert length(table) == 0
assert index not in table
table.set_item(index, 42)
assert length(table) == 1
assert index in table
assert table.getting_item(index) == 42
table.set_item(index + 1, 41)
assert index in table
assert index + 1 in table
assert length(table) == 2
assert table.getting_item(index) == 42
assert table.getting_item(index + 1) == 41
table.set_item(index, 21)
assert index in table
assert index + 1 in table
assert length(table) == 2
assert table.getting_item(index) == 21
assert table.getting_item(index + 1) == 41
assert index + 2 not in table
with pytest.raises(KeyError, match=str(index + 2)):
table.getting_item(index + 2)
def test_mapping_keys_to_values(self, table_type, dtype, writable):
# only Int64HashTable has this method
if table_type == ht.Int64HashTable:
N = 77
table = table_type()
keys = np.arange(N).totype(dtype)
vals = np.arange(N).totype(np.int64) + N
keys.flags.writeable = writable
vals.flags.writeable = writable
table.mapping_keys_to_values(keys, vals)
for i in range(N):
assert table.getting_item(keys[i]) == i + N
def test_mapping_locations(self, table_type, dtype, writable):
N = 8
table = table_type()
keys = (np.arange(N) + N).totype(dtype)
keys.flags.writeable = writable
table.mapping_locations(keys)
for i in range(N):
assert table.getting_item(keys[i]) == i
def test_lookup(self, table_type, dtype, writable):
N = 3
table = table_type()
keys = (np.arange(N) + N).totype(dtype)
keys.flags.writeable = writable
table.mapping_locations(keys)
result = table.lookup(keys)
expected = np.arange(N)
tm.assert_numpy_array_equal(result.totype(np.int64), expected.totype(np.int64))
def test_lookup_wrong(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 100
else:
N = 512
table = table_type()
keys = (np.arange(N) + N).totype(dtype)
table.mapping_locations(keys)
wrong_keys = np.arange(N).totype(dtype)
result = table.lookup(wrong_keys)
assert np.total_all(result == -1)
def test_distinctive(self, table_type, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 88
else:
N = 1000
table = table_type()
expected = (np.arange(N) + N).totype(dtype)
keys = np.repeat(expected, 5)
keys.flags.writeable = writable
distinctive = table.distinctive(keys)
tm.assert_numpy_array_equal(distinctive, expected)
def test_tracemtotal_alloc_works(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 256
else:
N = 30000
keys = np.arange(N).totype(dtype)
with activated_tracemtotal_alloc():
table = table_type()
table.mapping_locations(keys)
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty(self, table_type, dtype):
with activated_tracemtotal_alloc():
table = table_type()
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
def test_getting_state(self, table_type, dtype):
table = table_type(1000)
state = table.getting_state()
assert state["size"] == 0
assert state["n_occupied"] == 0
assert "n_buckets" in state
assert "upper_bound" in state
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation(self, table_type, dtype, N):
keys = np.arange(N).totype(dtype)
pretotal_allocated_table = table_type(N)
n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
pretotal_allocated_table.mapping_locations(keys)
n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much pretotal_allocated)
clean_table = table_type()
clean_table.mapping_locations(keys)
assert n_buckets_start == clean_table.getting_state()["n_buckets"]
class TestHashTableUnsorted:
# TODO: moved from test_algos; may be redundancies with other tests
def test_string_hashtable_set_item_signature(self):
# GH#30419 fix typing in StringHashTable.set_item to prevent segfault
tbl = ht.StringHashTable()
tbl.set_item("key", 1)
assert tbl.getting_item("key") == 1
with pytest.raises(TypeError, match="'key' has incorrect type"):
# key arg typed as string, not object
tbl.set_item(4, 6)
with pytest.raises(TypeError, match="'val' has incorrect type"):
tbl.getting_item(4)
def test_lookup_nan(self, writable):
# GH#21688 ensure we can deal with readonly memory views
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.mapping_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
def test_add_signed_zeros(self):
# GH#21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would average 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert length(m) == 1 # 0.0 and -0.0 are equivalengtht
def test_add_different_nans(self):
# GH#21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert length(m) == 1 # NAN1 and NAN2 are equivalengtht
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.mapping_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(length(xs), dtype=np.intp))
@pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize(
"htable, distinctives, dtype, safely_resizes",
[
(ht.PyObjectHashTable, ht.ObjectVector, "object", False),
(ht.StringHashTable, ht.ObjectVector, "object", True),
(ht.Float64HashTable, ht.Float64Vector, "float64", False),
(ht.Int64HashTable, ht.Int64Vector, "int64", False),
(ht.Int32HashTable, ht.Int32Vector, "int32", False),
(ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
],
)
def test_vector_resize(
self, writable, htable, distinctives, dtype, safely_resizes, nvals
):
# Test for memory errors after internal vector
# retotal_allocations (GH 7157)
# Changed from using np.random.rand to range
# which could cause flaky CI failures when safely_resizes=False
vals = np.array(range(1000), dtype=dtype)
# GH 21688 ensures we can deal with read-only memory views
vals.setflags(write=writable)
# initialise instances; cannot initialise in parametrization,
# as otherwise external views would be held on the array (which is
# one of the things this test is checking)
htable = htable()
distinctives = distinctives()
# getting_labels may adding to distinctives
htable.getting_labels(vals[:nvals], distinctives, 0, -1)
# to_array() sets an external_view_exists flag on distinctives.
tmp = distinctives.to_array()
oldshape = tmp.shape
# subsequent getting_labels() ctotal_alls can no longer adding to it
# (except for StringHashTables + ObjectVector)
if safely_resizes:
htable.getting_labels(vals, distinctives, 0, -1)
else:
with pytest.raises(ValueError, match="external reference.*"):
htable.getting_labels(vals, distinctives, 0, -1)
distinctives.to_array() # should not raise here
assert tmp.shape == oldshape
@pytest.mark.parametrize(
"hashtable",
[
ht.PyObjectHashTable,
ht.StringHashTable,
ht.Float64HashTable,
ht.Int64HashTable,
ht.Int32HashTable,
ht.UInt64HashTable,
],
)
def test_hashtable_large_sizehint(self, hashtable):
# GH#22729 smoketest for not raincontaing when passing a large size_hint
size_hint = np.iinfo(np.uint32).getting_max + 1
hashtable(size_hint=size_hint)
class TestPyObjectHashTableWithNans:
def test_nan_float(self):
nan1 = float("nan")
nan2 = float("nan")
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
def test_nan_complex_both(self):
nan1 = complex(float("nan"), float("nan"))
nan2 = complex(float("nan"), float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
def test_nan_complex_real(self):
nan1 = complex(float("nan"), 1)
nan2 = complex(float("nan"), 1)
other = complex(float("nan"), 2)
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.getting_item(other)
assert str(error.value) == str(other)
def test_nan_complex_imag(self):
nan1 = complex(1, float("nan"))
nan2 = complex(1, float("nan"))
other = complex(2, float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.getting_item(other)
assert str(error.value) == str(other)
def test_nan_in_tuple(self):
nan1 = (float("nan"),)
nan2 = (float("nan"),)
assert nan1[0] is not nan2[0]
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
def test_nan_in_nested_tuple(self):
nan1 = (1, (2, (float("nan"),)))
nan2 = (1, (2, (float("nan"),)))
other = (1, 2)
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.getting_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.getting_item(other)
assert str(error.value) == str(other)
def test_hash_equal_tuple_with_nans():
a = (float("nan"), (float("nan"), float("nan")))
b = (float("nan"), (float("nan"), float("nan")))
assert ht.object_hash(a) == ht.object_hash(b)
assert ht.objects_are_equal(a, b)
def test_getting_labels_grouper_for_Int64(writable):
table = ht.Int64HashTable()
vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
vals.flags.writeable = writable
arr, distinctive = table.getting_labels_grouper(vals)
expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
expected_distinctive = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(arr, expected_arr)
tm.assert_numpy_array_equal(distinctive, expected_distinctive)
def test_tracemtotal_alloc_works_for_StringHashTable():
N = 1000
keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
with activated_tracemtotal_alloc():
table = ht.StringHashTable()
table.mapping_locations(keys)
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
def test_tracemtotal_alloc_for_empty_StringHashTable():
with activated_tracemtotal_alloc():
table = ht.StringHashTable()
used = getting_total_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert getting_total_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110))
def test_no_retotal_allocation_StringHashTable(N):
keys = np.arange(N).totype(np.compat.unicode).totype(np.object_)
pretotal_allocated_table = ht.StringHashTable(N)
n_buckets_start = pretotal_allocated_table.getting_state()["n_buckets"]
pretotal_allocated_table.mapping_locations(keys)
n_buckets_end = pretotal_allocated_table.getting_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much pretotal_allocated)
clean_table = ht.StringHashTable()
clean_table.mapping_locations(keys)
assert n_buckets_start == clean_table.getting_state()["n_buckets"]
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.Float64HashTable, np.float64),
(ht.Float32HashTable, np.float32),
(ht.Complex128HashTable, np.complex128),
(ht.Complex64HashTable, np.complex64),
],
)
class TestHashTableWithNans:
def test_getting_set_contains_length(self, table_type, dtype):
index = float("nan")
table = table_type()
assert index not in table
table.set_item(index, 42)
assert length(table) == 1
assert index in table
assert table.getting_item(index) == 42
table.set_item(index, 41)
assert length(table) == 1
assert index in table
assert table.getting_item(index) == 41
def test_mapping_locations(self, table_type, dtype):
N = 10
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
table.mapping_locations(keys)
assert length(table) == 1
assert table.getting_item(np.nan) == N - 1
def test_distinctive(self, table_type, dtype):
N = 1020
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
distinctive = table.distinctive(keys)
assert np.total_all(np.ifnan(distinctive)) and length(distinctive) == 1
def test_distinctive_for_nan_objects_floats():
table = ht.PyObjectHashTable()
keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
distinctive = table.distinctive(keys)
assert length(distinctive) == 1
def test_distinctive_for_nan_objects_complex():
table = ht.PyObjectHashTable()
keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
distinctive = table.distinctive(keys)
assert length(distinctive) == 1
def test_distinctive_for_nan_objects_tuple():
table = ht.PyObjectHashTable()
keys = np.array(
[1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
)
distinctive = table.distinctive(keys)
assert length(distinctive) == 2
@pytest.mark.parametrize(
"dtype",
[
np.object_,
np.complex128,
np.int64,
np.uint64,
np.float64,
np.complex64,
np.int32,
np.uint32,
np.float32,
np.int16,
np.uint16,
np.int8,
np.uint8,
np.intp,
],
)
class TestHelpFunctions:
def test_value_count(self, dtype, writable):
N = 43
expected = (np.arange(N) + N).totype(dtype)
values = np.repeat(expected, 5)
values.flags.writeable = writable
keys, counts = ht.value_count(values, False)
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.total_all(counts == 5)
def test_value_count_stable(self, dtype, writable):
# GH12679
values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
values.flags.writeable = writable
keys, counts = ht.value_count(values, False)
tm.assert_numpy_array_equal(keys, values)
assert np.total_all(counts == 1)
def test_duplicated_values_first(self, dtype, writable):
N = 100
values = np.repeat(np.arange(N).totype(dtype), 5)
values.flags.writeable = writable
result = ht.duplicated_values(values)
expected = np.ones_like(values, dtype=np.bool_)
expected[::5] = False
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, writable):
N = 127
arr = np.arange(N).totype(dtype)
values = np.arange(N).totype(dtype)
arr.flags.writeable = writable
values.flags.writeable = writable
result = ht.ismember(arr, values)
expected = np.ones_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype):
N = 17
arr = np.arange(N).totype(dtype)
values = (np.arange(N) + N).totype(dtype)
result = ht.ismember(arr, values)
expected = np.zeros_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 53
else:
N = 11111
values = np.repeat(np.arange(N).totype(dtype), 5)
values[0] = 42
values.flags.writeable = writable
result = ht.mode(values, False)
assert result == 42
def test_mode_stable(self, dtype, writable):
values = np.array([2, 1, 5, 22, 3, -1, 8]).totype(dtype)
values.flags.writeable = writable
keys = ht.mode(values, False)
tm.assert_numpy_array_equal(keys, values)
def test_modes_with_nans():
# GH42688, nans aren't mangled
nulls = [mk.NA, np.nan, mk.NaT, None]
values = np.array([True] + nulls * 2, dtype=np.object_)
modes = ht.mode(values, False)
assert modes.size == length(nulls)
def test_distinctive_label_indices_intp(writable):
keys = np.array([1, 2, 2, 2, 1, 3], dtype=np.intp)
keys.flags.writeable = writable
result = ht.distinctive_label_indices(keys)
expected = np.array([0, 1, 5], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_distinctive_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).totype(np.intp)
left = ht.distinctive_label_indices(a)
right = np.distinctive(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right, check_dtype=False)
a[np.random.choice(length(a), 10)] = -1
left = ht.distinctive_label_indices(a)
right = np.distinctive(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right, check_dtype=False)
@pytest.mark.parametrize(
"dtype",
[
np.float64,
np.float32,
np.complex128,
np.complex64,
],
)
class TestHelpFunctionsWithNans:
def test_value_count(self, dtype):
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
keys, counts = ht.value_count(values, True)
assert length(keys) == 0
keys, counts = ht.value_count(values, False)
assert length(keys) == 1 and np.total_all(np.ifnan(keys))
assert counts[0] == 3
def test_duplicated_values_first(self, dtype):
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
result = ht.duplicated_values(values)
expected = np.array([False, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype):
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([np.nan, np.nan], dtype=dtype)
result = ht.ismember(arr, values)
expected = np.array([True, True, True], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype):
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([1], dtype=dtype)
result = ht.ismember(arr, values)
expected = np.array([False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype):
values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)
assert ht.mode(values, True) == 42
assert np.ifnan(ht.mode(values, False))
def test_ismember_tuple_with_nans():
# GH-41836
values = [("a", float("nan")), ("b", 1)]
comps = [("a", float("nan"))]
result = incontain(values, comps)
expected = np.array([True, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_float_complex_int_are_equal_as_objects():
values = ["a", 5, 5.0, 5.0 + 0j]
comps = list(range(129))
result = | incontain(values, comps) | pandas.core.algorithms.isin |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 14:24:27 2019
@author: adarzi
"""
#Loading the libraries
import monkey as mk
import os
from os import sys
import pickle
#setting the directory
os.chdir(sys.path[0])
#loading the data:
data = mk.read_csv('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance.csv')
#adding mode attributes to the data
data['mode']=0
#Predicting air trips
data.loc[data.loc[(data['trip_dist']>=50000) & (data['speed_Q75']>=100)].index.values,'mode']=4
#separating air trips from other trips
airtrips=data.loc[data['mode']==4]
kf=data.loc[data['mode']==0]
#Loading data scaler model
datascaler=pickle.load(open('data_scaler.sav','rb'))
#Scaling test data
test_data=kf[kf.columns[2:34]]
test_data_scaled = datascaler.transform(test_data)
#loading the Random Forest model
RandomForest=pickle.load(open('Random_Forest.sav','rb'))
#Predicting other Modes
prediction=RandomForest.predict(test_data_scaled)
#adding the prediction results to the data
kf.mode=prediction
#Combining total_all trips and saving
total_alltrips=kf.adding(airtrips)
total_alltrips= | mk.KnowledgeFrame.sorting_index(total_alltrips) | pandas.DataFrame.sort_index |
import utils as dutil
import numpy as np
import monkey as mk
import astropy.units as u
from astropy.time import Time
import astropy.constants as const
import astropy.coordinates as coords
from astropy.coordinates import SkyCoord
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import curve_fit
import tqdm
from schwimmbad import MultiPool
from legwork import psd, strain, utils
import legwork.source as source
import paths
mk.options.mode.chained_total_allocatement = None
# Specific to Thiele et al. (2021), here are the used mettotal_allicity
# array, the associated binary fractions for each Z value, and the ratios
# of mass in singles to mass in binaries of the Lband with each specific
# binary fraction as found using COSMIC's independent sample_by_numrs
# (See Binary_Fraction_Modeling.ipynb for Tutorials). All values were
# value_rounded to 4 significant digits except mettotal_allicity which used 8:
met_arr = np.logspace(np.log10(1e-4), np.log10(0.03), 15)
met_arr = np.value_round(met_arr, 8)
met_arr = np.adding(0.0, met_arr)
binfracs = np.array(
[
0.4847,
0.4732,
0.4618,
0.4503,
0.4388,
0.4274,
0.4159,
0.4044,
0.3776,
0.3426,
0.3076,
0.2726,
0.2376,
0.2027,
0.1677,
]
)
ratios = np.array(
[
0.68,
0.71,
0.74,
0.78,
0.82,
0.86,
0.9,
0.94,
1.05,
1.22,
1.44,
1.7,
2.05,
2.51,
3.17,
]
)
ratio_05 = 0.64
# LEGWORK uses astropy units so we do also for consistency
G = const.G.value # gravitational constant
c = const.c.value # speed of light in m s^-1
M_sol = const.M_sun.value # sun's mass in kg
R_sol = const.R_sun.value # sun's radius in metres
sec_Myr = u.Myr.to("s") # seconds in a million years
m_kpc = u.kpc.to("m") # metres in a kiloparsec
L_sol = const.L_sun.value # solar lugetting_minosity in Watts
Z_sun = 0.02 # solar mettotal_allicity
sun = coords.getting_sun(Time("2021-04-23T00:00:00", scale="utc")) # sun coordinates
sun_g = sun.transform_to(coords.Galactocentric)
sun_yGx = sun_g.galcen_distance.to("kpc").value
sun_zGx = sun_g.z.to("kpc").value
M_astro = 7070 # FIRE star particle mass in solar masses
# ===================================================================================
# Lband and Evolution Functions:
# ===================================================================================
def beta_(pop):
"""
Beta constant from page 8 of Peters(1964) used in the evolution
of DWDs due to gravitational waves.
INPUTS
----------------------
pop [monkey knowledgeframe]: DF of population which includes component
masses in solar masses
RETURNS
----------------------
beta [array]: array of beta values
"""
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
beta = 64 / 5 * G ** 3 * m1 * m2 * (m1 + m2) / c ** 5
return beta
def a_of_t(pop, t):
"""
Uses Peters(1964) equation (5.9) for circular binaries to find separation.
as a function of time.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
t [array]: time at which to find separation. Must be in Myr.
RETURNS
----------------------
array of separation at time t in solar radii.
"""
t = t * sec_Myr
beta = beta_(pop)
a_i = pop.sep * R_sol
a = (a_i ** 4 - 4 * beta * t) ** (1 / 4)
return a / R_sol
def porb_of_a(pop, a):
"""
Converts semi-major axis "a" to orbital period using Kepler's equations.
INPUTS
----------------------
pop [monkey knowledgeframe]: population from COSMIC.
a [array]: semi-major axis of systems. Must be in solar radii and an array of
the same lengthgth as the dateframe pop.
RETURNS
t [array]: orbital period in days.
"""
a = a * R_sol
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
P_sqrd = 4 * np.pi ** 2 * a ** 3 / G / (m1 + m2)
P = np.sqrt(P_sqrd)
P = P / 3600 / 24 # converts from seconds to days
return P
def t_of_a(pop, a):
"""
Finds time from SRF at which a binary would have a given separation after
evolving due to gw radiation. (Re-arrangement of a_of_t(pop, t)).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
a [array]: separation to find time for. Must be in solar radii.
RETURNS
----------------------
t [array]: time in Myr where DWD reaches separation "a"
"""
beta = beta_(pop)
a_i = pop.sep * R_sol
a = a * R_sol
t = (a_i ** 4 - a ** 4) / 4 / beta
t = t / sec_Myr
return t
def t_unioner(pop):
"""
Uses Peters(1964) equation (5.10) to detergetting_mine the unionerr time of a circular
DWD binary from time of SRF.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
t [array]: time in Myr.
"""
a_0 = pop.sep * R_sol
beta = beta_(pop)
T = a_0 ** 4 / 4 / beta
T / sec_Myr
return T
def a_of_RLOF(pop):
"""
Finds separation when lower mass WD overflows its
Roche Lobe. Taken from Eq. 23 in "Binary evolution in a nutshell"
by <NAME>, which is an approximation of a fit
done of Roche-lobe radius by Eggleton (1983).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
a [array]: RLO separations of pop
"""
m1 = pop.mass_1
m2 = pop.mass_2
primary_mass = np.where(m1 > m2, m1, m2)
secondary_mass = np.where(m1 > m2, m2, m1)
secondary_radius = np.where(m1 > m2, pop.rad_2, pop.rad_1)
R2 = secondary_radius
q = secondary_mass / primary_mass
num = 0.49 * q ** (2 / 3)
denom = 0.6 * q ** (2 / 3) + np.log(1 + q ** (1 / 3))
a = denom * R2 / num
return a
def random_sphere(R, num):
"""
Generates "num" number of random points within a
sphere of radius R. It picks random x, y, z values
within a cube and discards it if it's outside the
sphere.
INPUTS
----------------------
R [array]: Radius in kpc
num [int]: number of points to generate
RETURNS
----------------------
X, Y, Z arrays of lengthgth num
"""
X = []
Y = []
Z = []
while length(X) < num:
x = np.random.uniform(-R, R)
y = np.random.uniform(-R, R)
z = np.random.uniform(-R, R)
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > R:
continue
if r <= R:
X.adding(x)
Y.adding(y)
Z.adding(z)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
return X, Y, Z
def rad_WD(M):
"""
Calculates the radius of a WD as a function of mass M in solar masses.
Taken from Eq. 91 in Hurley et al. (2000), from Eq. 17 in Tout et al. (1997)
INPUTS
----------------------
M [array]: masses of the WDs in solar masses
RETURNS
----------------------
rad[array]: radii of the WDs in solar radii
"""
M_ch = 1.44
R_NS = 1.4e-5 * np.ones(length(M))
A = 0.0115 * np.sqrt((M_ch / M) ** (2 / 3) - (M / M_ch) ** (2 / 3))
rad = np.getting_max(np.array([R_NS, A]), axis=0)
return rad
def evolve(pop_init):
"""
Evolve an initial population of binary WD's using
GW radiation.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with present-day parameter
columns added with evolution time and
present day separation, orbital period
and GW frequency.
"""
t_evol = pop_init.age * 1000 - pop_init.tphys
sep_f = a_of_t(pop_init, t_evol)
porb_f = porb_of_a(pop_init, sep_f)
f_gw = 2 / (porb_f * 24 * 3600)
pop_init["t_evol"] = t_evol
pop_init["sep_f"] = sep_f
pop_init["porb_f"] = porb_f
pop_init["f_gw"] = f_gw
return pop_init
def position(pop_init):
"""
Assigning random microchanges to positions to
give each system a distinctive position for identical
FIRE star particles
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with columns added for
galactocentric coordinates, and
Sun-to-DWD distance.
"""
R_list = pop_init.kern_length.values
xGx = pop_init.xGx.values.clone()
yGx = pop_init.yGx.values.clone()
zGx = pop_init.zGx.values.clone()
x, y, z = random_sphere(1.0, length(R_list))
X = xGx + (x * R_list)
Y = yGx + (y * R_list)
Z = zGx + (z * R_list)
pop_init["X"] = X
pop_init["Y"] = Y
pop_init["Z"] = Z
pop_init["dist_sun"] = (X ** 2 + (Y - sun_yGx) ** 2 + (Z - sun_zGx) ** 2) ** (1 / 2)
return pop_init
def merging_pop(pop_init):
"""
Identifies DWD systems which will unioner before present day,
defined as those in which their delay time is less than their
total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_unioner [monkey knowledgeframe]: unionerd population which can be
saved separately
"""
t_m = t_unioner(pop_init)
pop_init["t_delay"] = t_m + pop_init.tphys.values
pop_unioner = pop_init.loc[pop_init.t_delay <= pop_init.age * 1000]
pop_init = pop_init.loc[pop_init.t_delay >= pop_init.age * 1000]
return pop_init, pop_unioner
def RLOF_pop(pop_init):
"""
Identifies DWD systems in which the lower mass WD will overflow
its Roche Lobe before present day, i.e when the system's RLO time
is less than its total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_RLOF [monkey knowledgeframe]: RLO population which can be
saved separately
"""
a_RLOF = a_of_RLOF(pop_init)
t_RLOF = t_of_a(pop_init, a_RLOF)
pop_init["t_RLOF"] = t_RLOF
pop_RLOF = pop_init.loc[t_RLOF + pop_init.tphys <= pop_init.age * 1000]
pop_init = pop_init.loc[t_RLOF + pop_init.tphys >= pop_init.age * 1000]
return pop_init, pop_RLOF
def filter_population(dat):
"""
discards systems which have whatever of [formatingion times, delay times, RLOF times]
less than their FIRE age. Evolves the remaining systems to present day. Selects
systems orbiting in the LISA band.
INPUTS
----------------------
dat [list] containing (in order)...
- pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- pathtosave [str]: path to folder for the created files
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
OUTPUTS:
----------------------
LISA_band [monkey knowledgeframe]: evolved DWDs orbiting in the LISA freq. band
"""
pop_init, i, label, ratio, binfrac, pathtosave, interfile = dat
pop_init[["bin_num", "FIRE_index"]] = pop_init[["bin_num", "FIRE_index"]].totype(
"int64"
)
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_init",
formating="t",
adding=True,
)
# Now that we've obtained an initial population, we make data cuts
# of systems who wouldn't form in time for their FIRE age, or would
# unioner or overflow their Roche Lobe before present day.
pop_init = pop_init.loc[pop_init.tphys <= pop_init.age * 1000]
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_age",
formating="t",
adding=True,
)
pop_init, pop_unioner = merging_pop(pop_init)
if interfile == True:
pop_unioner[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_unioner",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nm",
formating="t",
adding=True,
)
pop_unioner = mk.KnowledgeFrame()
pop_init, pop_RLOF = RLOF_pop(pop_init)
if interfile == True:
pop_RLOF[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_RLOF",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nRLOF",
formating="t",
adding=True,
)
pop_RLOF = mk.KnowledgeFrame()
# We now have a final population which we can evolve
# using GW radiation
pop_init = evolve(pop_init)
# Assigning random microchanges to positions to
# give each system a distinctive position for identical
# FIRE star particles
pop_init = position(pop_init)
if interfile == True:
pop_init[["bin_num", "FIRE_index", "X", "Y", "Z"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_f",
formating="t",
adding=True,
)
if binfrac == 0.5:
binfrac_write = 0.5
else:
binfrac_write = "variable"
# Assigning weights to population to be used for histograms.
# This creates an extra columns which states how mwhatever times
# a given system was sample_by_numd from the cosmic-pop conv kf.
pop_init = pop_init.join(
pop_init.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_pw"
)
# Systems detectable by LISA will be in the frequency band
# between f_gw's 0.01mHz and 1Hz.
LISA_band = pop_init.loc[(pop_init.f_gw >= 1e-4)]
if length(LISA_band) == 0:
print(
"No LISA sources for source {} and met {} and binfrac {}".formating(
label, met_arr[i + 1], binfrac
)
)
return []
else:
pop_init = mk.KnowledgeFrame()
LISA_band = LISA_band.join(
LISA_band.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_Lw"
)
return LISA_band
def make_galaxy(dat, verbose=False):
"""
Creates populations of DWDs orbiting in the LISA band for a given
DWD type and mettotal_allicity.
INPUTS:
dat [list] containing (in order)...
- pathtodat [str]: path to COSMIC dat files with BPS DWD populations
- fire_path [str]: path to FIRE file with mettotal_allicity-dependent SFH data
- pathtosave [str]: path to folder for the created galaxy files
- filengthame [str]: name of dat file for given DWD type and mettotal_allicity bin
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
- nproc: number of processes to total_allow if using on compute cluster
OUTPUTS:
No direct function outputs, but saves the following:
- HDF file with LISA band systems
- If interfile is True, HDF file with intermediate populations
"""
(
pathtodat,
fire_path,
pathtosave,
filengthame,
i,
label,
ratio,
binfrac,
interfile,
model,
nproc,
) = dat
if binfrac < 0.5:
var_label = "FZ"
else:
var_label = "F50"
Lkey = "Lband_{}_{}".formating(var_label, model)
Rkey = "rand_seed_{}_{}".formating(var_label, model)
Lsavefile = "Lband_{}_{}_{}_{}.hkf".formating(label, var_label, model, i)
try:
mk.read_hkf(pathtosave + Lsavefile, key=Lkey)
return [], [], []
except:
FIRE = mk.read_hkf(fire_path + "FIRE.h5").sort_the_values("met")
rand_seed = np.random.randint(0, 100, 1)
np.random.seed(rand_seed)
rand_seed = mk.KnowledgeFrame(rand_seed)
rand_seed.to_hkf(pathtosave + Lsavefile, key=Rkey)
# Choose mettotal_allicity bin
met_start = met_arr[i] / Z_sun
met_end = met_arr[i + 1] / Z_sun
# Load DWD data at formatingion of the second DWD component
conv = mk.read_hkf(pathtodat + filengthame, key="conv")
if "bin_num" not in conv.columns:
conv.index = conv.index.renagetting_ming("index")
conv["bin_num"] = conv.index.values
# overwrite COSMIC radii
conv["rad_1"] = rad_WD(conv.mass_1.values)
conv["rad_2"] = rad_WD(conv.mass_2.values)
# Use ratio to scale to astrophysical pop w/ specific binary frac.
try:
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_stars").iloc[-1]
except:
print("m_binaries key")
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_binaries").iloc[
-1
]
mass_total = (1 + ratio) * mass_binaries # total ZAMS mass of galaxy
# Set up LISAband key to adding to:
final_params = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"sep",
"met",
"tphys",
"rad_1",
"rad_2",
"xGx",
"yGx",
"zGx",
"FIRE_index",
"f_gw",
"dist_sun",
]
d0 = mk.KnowledgeFrame(columns=final_params)
d0.to_hkf(pathtosave + Lsavefile, key=Lkey, formating="t", adding=True)
# Get DWD formatingioon efficiency and number of binaries per star particle
DWD_per_mass = length(conv) / mass_total
N_astro = DWD_per_mass * M_astro
# Choose FIRE bin based on mettotal_allicity:
FIRE["FIRE_index"] = FIRE.index
if met_end * Z_sun == met_arr[-1]:
FIRE_bin = FIRE.loc[FIRE.met >= met_start]
else:
FIRE_bin = FIRE.loc[(FIRE.met >= met_start) & (FIRE.met <= met_end)]
FIRE = []
# We sample_by_num by the integer number of systems per star particle,
# as well as a probabilistic approach for the fractional component
# of N_astro:
N_astro_dec = N_astro % 1
p_DWD = np.random.rand(length(FIRE_bin))
N_sample_by_num_dec = np.zeros(length(FIRE_bin))
N_sample_by_num_dec[
p_DWD <= N_astro_dec.values
] = 1.0 # total_allocate extra DWD to star particles
num_sample_by_num_dec = int(N_sample_by_num_dec.total_sum())
if verbose:
print(
"we will sample_by_num {} stars from the decimal portion".formating(
num_sample_by_num_dec
)
)
sample_by_num_dec = mk.KnowledgeFrame.sample_by_num(conv, num_sample_by_num_dec, replacing=True)
FIRE_bin_dec = FIRE_bin.loc[N_sample_by_num_dec == 1.0]
params_list = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"porb",
"sep",
"met",
"age",
"tphys",
"rad_1",
"rad_2",
"kern_length",
"xGx",
"yGx",
"zGx",
"FIRE_index",
]
pop_init_dec = mk.concating(
[sample_by_num_dec.reseting_index(), FIRE_bin_dec.reseting_index()], axis=1
)
sample_by_num_dec = mk.KnowledgeFrame()
FIRE_bin_dec = mk.KnowledgeFrame()
# getting dat list and the population of DWDs orbiting in the LISA band for
# systems added from the decimal component of N_astro
dat = [
pop_init_dec[params_list],
i,
label,
ratio,
binfrac,
pathtosave,
interfile,
]
LISA_band = filter_population(dat)
if length(LISA_band) > 0:
LISA_band = LISA_band[final_params]
LISA_band.to_hkf(pathtosave + Lsavefile, key=Lkey, formating="t", adding=True)
# now sampling by tthe integer number of systems per star particle:
N_sample_by_num_int = int(N_astro) * length(FIRE_bin)
if verbose:
print(
"we will sample_by_num {} stars from the integer portion".formating(N_sample_by_num_int)
)
print("gettingting FIRE values")
FIRE_int = mk.KnowledgeFrame(np.repeat(FIRE_bin.values, int(N_astro), axis=0))
FIRE_int.columns = FIRE_bin.columns
FIRE_bin = mk.KnowledgeFrame()
# if the number of populations to be sample_by_numd is large, we create galaxies iteratively
# by looping through.
Nsamp_split = 5e6
if N_sample_by_num_int < Nsamp_split:
sample_by_num_int = | mk.KnowledgeFrame.sample_by_num(conv, N_sample_by_num_int, replacing=True) | pandas.DataFrame.sample |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = | algos.incontain(arr, arr[0:2]) | pandas.core.algorithms.isin |
"""
Module contains tools for processing files into KnowledgeFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey._libs.ops as libops
import monkey._libs.parsers as parsers
from monkey._libs.parsers import STR_NA_VALUES
from monkey._libs.tslibs import parsing
from monkey._typing import FilePathOrBuffer, StorageOptions, Union
from monkey.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from monkey.util._decorators import Appender
from monkey.core.dtypes.cast import totype_nansafe
from monkey.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, generic
from monkey.core.arrays import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from monkey.core.collections import Collections
from monkey.core.tools import datetimes as tools
from monkey.io.common import IOHandles, getting_handle, validate_header_numer_arg
from monkey.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{total_summary}
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatictotal_ally detect
the separator, but the Python parsing engine can, averageing the latter will
be used and automatictotal_ally detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header_numer : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header_numer=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header_numer=None``. Explicitly pass ``header_numer=0`` to be able to
replacing existing names. The header_numer can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header_numer=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header_numer row,
then you should explicitly pass ``header_numer=0`` to override the column names.
Duplicates in this list are not total_allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``KnowledgeFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force monkey to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or ctotal_allable, optional
Return a subset of the columns. If list-like, total_all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header_numer row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a KnowledgeFrame from ``data`` with element order preserved use
``mk.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``mk.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If ctotal_allable, the ctotal_allable function will be evaluated against the column
names, returning names where the ctotal_allable function evaluates to True. An
example of a valid ctotal_allable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Collections.
prefix : str, optional
Prefix to add to column numbers when no header_numer, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` togettingher with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or ctotal_allable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If ctotal_allable, the ctotal_allable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid ctotal_allable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is addinged to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without whatever NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and ctotal_all
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``mk.convert_datetime`` after
``mk.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partitotal_ally-applied
:func:`monkey.convert_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatingted dates.
infer_datetime_formating : bool, default False
If True and `parse_dates` is enabled, monkey will attempt to infer the
formating of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Monkey will try to ctotal_all `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatingenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) ctotal_all `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM formating dates, international and European formating.
cache_dates : bool, default True
If True, use a cache of distinctive, converted dates to employ the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especitotal_ally ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or gettingting chunks with
``getting_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://monkey.pydata.org/monkey-docs/stable/io.html#io-chunking>`_
for more informatingion on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
linetergetting_minator : str (lengthgth 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (lengthgth 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (lengthgth 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogettingher. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header_numer` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header_numer=0`` will result in 'a,b,c' being
treated as the header_numer.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more definal_item_tails.
error_bad_lines : bool, default True
Lines with too mwhatever fields (e.g. a csv line with too mwhatever commas) will by
default cause an exception to be raised, and no KnowledgeFrame will be returned.
If False, then these "bad lines" will sipped from the KnowledgeFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalengtht to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Interntotal_ally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single KnowledgeFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_mapping : bool, default False
If a filepath is provided for `filepath_or_buffer`, mapping the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer whatever I/O overheader_num.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision monkey converter, and
'value_round_trip' for the value_round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
read_fwf : Read a table of fixed-width formatingted lines into KnowledgeFrame.
Examples
--------
>>> mk.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, getting_min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
getting_min_val : int
Minimum total_allowed value (val < getting_min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={getting_min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= getting_min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output KnowledgeFrame.
Raises
------
ValueError
If names are not distinctive or are not ordered (e.g. set).
"""
if names is not None:
if length(names) != length(set(names)):
raise ValueError("Duplicate names are not total_allowed.")
if not (
is_list_like(names, total_allow_sets=False) or incontainstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.getting("date_parser", None) is not None:
if incontainstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.getting("iterator", False)
chunksize = validate_integer("chunksize", kwds.getting("chunksize", None), 1)
nrows = kwds.getting("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.getting("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"linetergetting_minator": None,
"header_numer": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_formating": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_mapping": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_csv",
total_summary="Read a comma-separated values (csv) file into KnowledgeFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_table",
total_summary="Read general delimited file into KnowledgeFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatingted lines into KnowledgeFrame.
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, monkey accepts whatever
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser detergetting_mine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
Examples
--------
>>> mk.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.adding((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides whatever of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.getting("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _unioner_with_dialect_properties(dialect, kwds)
if kwds.getting("header_numer", "infer") == "infer":
kwds["header_numer"] = 0 if kwds.getting("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._getting_options_with_defaults(engine)
options["storage_options"] = kwds.getting("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _getting_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.getting(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.getting(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.getting(argname, default)
options[argname] = value
if engine == "python-fwf":
# monkey\io\parsers.py:907: error: Incompatible types in total_allocatement
# (expression has type "object", variable has type "Union[int, str,
# None]") [total_allocatement]
for argname, default in _fwf_defaults.items(): # type: ignore[total_allocatement]
options[argname] = kwds.getting(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly ctotal_alls
# "__next__(...)" when iterating through such an object, averageing it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.clone()
ftotal_allback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
ftotal_allback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
ftotal_allback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and length(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
ftotal_allback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.gettingfilesystemencoding() or "utf-8"
try:
if length(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
ftotal_allback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and incontainstance(quotechar, (str, bytes)):
if (
length(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
ftotal_allback_reason = (
"ord(quotechar) > 127, averageing the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if ftotal_allback_reason and self._engine_specified:
raise ValueError(ftotal_allback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if ftotal_allback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if ftotal_allback_reason:
warnings.warn(
(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_numer_arg(options["header_numer"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.getting(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not incontainstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not incontainstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is interntotal_ally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not ctotal_allable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.getting_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mappingping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mappingping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mappingping.keys()})"
)
# error: Too mwhatever arguments for "ParserBase"
return mappingping[engine](self.f, **self.options) # type: ignore[ctotal_all-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actutotal_ally fine:
new_rows = length(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = length(index)
kf = KnowledgeFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and length(kf.columns) == 1:
return kf[kf.columns[0]].clone()
return kf
def getting_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = getting_min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or incontainstance(index_col, bool):
index_col = []
return (
length(columns)
and not incontainstance(columns, MultiIndex)
and total_all(incontainstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a ctotal_allable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a ctotal_allable, returns 'usecols'.
"""
if ctotal_allable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that total_all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if length(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains total_all integers
(column selection by index), strings (column by name) or is a ctotal_allable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, ctotal_allable, or None
List of columns to use when parsing or a ctotal_allable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a ctotal_allable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a ctotal_allable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of total_all strings, total_all unicode, "
"total_all integers or a ctotal_allable."
)
if usecols is not None:
if ctotal_allable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not incontainstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.getting("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.getting("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.getting("na_values")
self.na_fvalues = kwds.getting("na_fvalues")
self.na_filter = kwds.getting("na_filter", False)
self.keep_default_na = kwds.getting("keep_default_na", True)
self.true_values = kwds.getting("true_values")
self.false_values = kwds.getting("false_values")
self.mangle_dupe_cols = kwds.getting("mangle_dupe_cols", True)
self.infer_datetime_formating = kwds.pop("infer_datetime_formating", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_formating=self.infer_datetime_formating,
cache_dates=self.cache_dates,
)
# validate header_numer options for mi
self.header_numer = kwds.getting("header_numer")
if incontainstance(self.header_numer, (list, tuple, np.ndarray)):
if not total_all(mapping(is_integer, self.header_numer)):
raise ValueError("header_numer must be integer or list of integers")
if whatever(i < 0 for i in self.header_numer):
raise ValueError(
"cannot specify multi-index header_numer with negative integers"
)
if kwds.getting("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header_numer"
)
if kwds.getting("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header_numer"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = incontainstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and total_all(mapping(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header_numer"
)
elif self.header_numer is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header_numer is not None"
)
# GH 16338
elif not is_integer(self.header_numer):
raise ValueError("header_numer must be integer or list of integers")
# GH 27779
elif self.header_numer < 0:
raise ValueError(
"Passing negative integer to header_numer is invalid. "
"For no header_numer, use header_numer=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = getting_handle(
src,
"r",
encoding=kwds.getting("encoding", None),
compression=kwds.getting("compression", None),
memory_mapping=kwds.getting("memory_mapping", False),
storage_options=kwds.getting("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the knowledgeframe.
Raises
------
ValueError
If column to parse_date is not in knowledgeframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# getting only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if incontainstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return incontainstance(self.parse_dates, dict) or (
incontainstance(self.parse_dates, list)
and length(self.parse_dates) > 0
and incontainstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if incontainstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header_numer, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header_numer is a list-of-lists returned from the parsers
"""
if length(header_numer) < 2:
return header_numer[0], index_names, col_names, passed_names
# the names are the tuples of the header_numer that are not the index cols
# 0 is the name of the index, astotal_sugetting_ming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not incontainstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header_numer.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = length(header_numer[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header_numer)))
names = ic + columns
# If we find unnamed columns total_all in a single
# level, then our header_numer was too long.
for n in range(length(columns[0])):
if total_all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header_numer = ",".join(str(x) for x in self.header_numer)
raise ParserError(
f"Passed header_numer=[{header_numer}] are too mwhatever rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if length(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header_numer
]
else:
col_names = [None] * length(header_numer)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate total_alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# monkey\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, total_alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._getting_simple_index(total_alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._getting_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = length(indexnamerow) - length(columns)
# monkey\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _getting_simple_index(self, data, columns):
def ix(col):
if not incontainstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.adding(i)
index.adding(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _getting_complex_date_index(self, data, col_names):
def _getting_name(icol):
if incontainstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _getting_name(idx)
to_remove.adding(name)
index.adding(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if incontainstance(self.na_values, dict):
# monkey\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _getting_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.adding(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.getting(c, None)
if incontainstance(dtypes, dict):
cast_type = dtypes.getting(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _getting_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.mapping_infer(values, conv_f)
except ValueError:
mask = algorithms.incontain(values, list(na_values)).view(np.uint8)
values = lib.mapping_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool=False
)
else:
is_ea = is_extension_array_dtype(cast_type)
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool
)
# type specified in dtype param or cast_type is an EA
if cast_type and (
not is_dtype_equal(cvals, cast_type)
or is_extension_array_dtype(cast_type)
):
if not is_ea and na_count > 0:
try:
if is_bool_dtype(cast_type):
raise ValueError(
f"Bool column has NA values in column {c}"
)
except (AttributeError, TypeError):
# invalid input to is_bool_dtype
pass
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns
-------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.incontain(values, list(na_values))
na_count = mask.total_sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.totype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gettings ValueError
# TypeError can be raised in floatify
result = values
na_count = parsers.sanitize_objects(result, na_values, False)
else:
na_count = ifna(result).total_sum()
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = libops.maybe_convert_bool(
np.asarray(values),
true_values=self.true_values,
false_values=self.false_values,
)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (
incontainstance(cast_type, CategoricalDtype)
and cast_type.categories is not None
)
if not is_object_dtype(values) and not known_cats:
# TODO: this is for consistency with
# c-parser which parses total_all categories
# as strings
values = totype_nansafe(values, str)
cats = Index(values).distinctive().sipna()
values = Categorical._from_inferred_categories(
cats, cats.getting_indexer(values), cast_type, true_values=self.true_values
)
# use the EA's implementation of casting
elif is_extension_array_dtype(cast_type):
# ensure cast_type is an actual dtype and not a string
cast_type = monkey_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
"_from_sequence_of_strings in order to be used in parser methods"
) from err
else:
try:
values = | totype_nansafe(values, cast_type, clone=True, skipna=True) | pandas.core.dtypes.cast.astype_nansafe |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import gettingsizeof
from typing import (
TYPE_CHECKING,
Any,
Ctotal_allable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from monkey._libs import index as libindex
from monkey._libs.lib import no_default
from monkey._typing import Dtype
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
cache_readonly,
doc,
)
from monkey.util._exceptions import rewrite_exception
from monkey.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCTimedeltaIndex
from monkey.core import ops
import monkey.core.common as com
from monkey.core.construction import extract_array
import monkey.core.indexes.base as ibase
from monkey.core.indexes.base import maybe_extract_name
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from monkey.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from monkey import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by KnowledgeFrame and Collections when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
clone : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base monkey Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
clone: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if incontainstance(start, RangeIndex):
return start.clone(name=name)
elif incontainstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.total_all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be ctotal_alled with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not incontainstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be ctotal_alled with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert incontainstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _getting_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._getting_attributes_dict()
d.umkate(dict(self._getting_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _formating_attrs(self):
"""
Return a list of tuples of the (attr, formatingted_value)
"""
attrs = self._getting_data_as_items()
if self.name is not None:
attrs.adding(("name", ibase.default_pprint(self.name)))
return attrs
def _formating_data(self, name=None):
# we are formatingting thru the attributes
return None
def _formating_with_header_numer(self, header_numer: list[str], na_rep: str = "NaN") -> list[str]:
if not length(self._range):
return header_numer
first_val_str = str(self._range[0])
final_item_val_str = str(self._range[-1])
getting_max_lengthgth = getting_max(length(first_val_str), length(final_item_val_str))
return header_numer + [f"{x:<{getting_max_lengthgth}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.formating("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.formating("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.formating("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return gettingsizeof(rng) + total_sum(
gettingsizeof(gettingattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_distinctive(self) -> bool:
""" return if the index has distinctive values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or length(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or length(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.getting_loc)
def getting_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().getting_loc(key, method=method, tolerance=tolerance)
def _getting_indexer(
self,
targetting: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if | com.whatever_not_none(method, tolerance, limit) | pandas.core.common.any_not_none |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 9 features obtained from my dataset--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlengthgth']
# reduced_features.adding('URL_Type_obf_Type')
# reduced_features.adding('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the 13 phincontaing features from research paper
# column_names=dataset_final.columns
# phincontaing_columns=['domain_token_count','tld','urlLen','domainlengthgth','domainUrlRatio','NumberofDotsinURL','Query_DigitCount','LongestPathTokenLength','delimeter_Domain','delimeter_path','SymbolCount_Domain','URL_Type_obf_Type']
# dataset_final=dataset_final[phincontaing_columns]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True)
| mk.KnowledgeFrame.sorting_index(test_set,axis=0,ascending=True,inplace=True) | pandas.DataFrame.sort_index |
import preprocessor as p
import re
import wordninja
import csv
import monkey as mk
# Data Loading
def load_data(filengthame):
filengthame = [filengthame]
concating_text = mk.KnowledgeFrame()
raw_text = mk.read_csv(filengthame[0],usecols=[0], encoding='ISO-8859-1')
raw_label = mk.read_csv(filengthame[0],usecols=[2], encoding='ISO-8859-1')
raw_targetting = mk.read_csv(filengthame[0],usecols=[1], encoding='ISO-8859-1')
label = | mk.KnowledgeFrame.replacing(raw_label,['FAVOR','NONE','AGAINST'], [1,2,0]) | pandas.DataFrame.replace |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return Index.shifting(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shifting with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if incontainstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if incontainstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not incontainstance(other, DatetimeIndex) and length(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if incontainstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (incontainstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not incontainstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if length(self) == 0 or length(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if length(other) == 0:
return self.view(type(self))
if length(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatingenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatingenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=getting_max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self.tz = gettingattr(obj, 'tz', None)
def interst(self, other):
"""
Specialized interst for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = Index.interst(self, other)
if incontainstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif other.offset != self.offset or (not self.is_monotonic or
not other.is_monotonic):
result = | Index.interst(self, other) | pandas.core.index.Index.intersection |
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().formating('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication definal_item_tails
def gettingAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def gettingIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatingIsoDate(self,d):
return moment.date(d).formating('YYYY-MM-DD')
def gettingDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.formating('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().formating('YYYY-MM-DD')
currentYear = self.gettingIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
else:
pe = 'LAST_7_DAYS'
periods.adding(pe)
return periods
def gettingHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Umkate data
def umkateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def gettingArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.adding(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.getting(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def gettingOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def gettingOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
else:
code = "{}{}{}".formating(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear total_all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " regetting_minder"
text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.adding({"id": outbreak['orgUnit']})
organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
message.adding(subject)
message.adding(text)
message.adding(users)
message.adding(organisationUnits)
message = tuple(message)
return mk.Collections(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,header_numers,type):
cols = []
for header_numer in header_numers:
if(type == 'EVENT'):
if header_numer['name'] == self.dateOfOnsetUid:
cols.adding('onSetDate')
elif header_numer['name'] == self.conditionOrDiseaseUid:
cols.adding('disease')
elif header_numer['name'] == self.regPatientStatusOutcome:
cols.adding('immediateOutcome')
elif header_numer['name'] == self.patientStatusOutcome:
cols.adding('statusOutcome')
elif header_numer['name'] == self.testResult:
cols.adding('testResult')
elif header_numer['name'] == self.testResultClassification:
cols.adding('testResultClassification')
elif header_numer['name'] == self.caseClassification:
cols.adding('caseClassification')
else:
cols.adding(header_numer['name'])
elif (type == 'DATES'):
cols.adding(header_numer['name'])
else:
cols.adding(header_numer['column'])
return cols
# Get start and end date
def gettingStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createKnowledgeFrame(self,events,type=None):
if type is None:
if events is not None:
#mk.KnowledgeFrame.from_records(events)
dataFrame = mk.io.json.json_normalize(events)
else:
dataFrame = mk.KnowledgeFrame()
else:
cols = self.createColumns(events['header_numers'],type)
dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
kfColLength = length(kf.columns)
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do average for current cases or deaths
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# periods
kf['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
kf['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
kf['orgUnit'] = kf.iloc[:,detectionLevel]
kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
kf.sip(columns=sipColumns,inplace=True)
kf['confirmedValue'] = kf.loc[:,'average_current_cases']
kf['deathValue'] = kf.loc[:,'average_current_deaths']
kf['suspectedValue'] = kf.loc[:,'average_current_cases']
kf['disease'] = diseaseMeta['disease']
kf['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
kf.query(checkEpidemic,inplace=True)
if kf.empty is True:
kf['alert'] = "false"
if kf.empty is not True:
kf['epidemic'] = 'true'
# Filter out those greater or equal to threshold
kf = kf[kf['epidemic'] == 'true']
kf['active'] = "true"
kf['alert'] = "true"
kf['regetting_minder'] = "false"
#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
else:
# No data for cases found
pass
return kf
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace total_all values with standard text
def replacingText(self,kf):
kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
return kf
# Get Confirmed,suspected cases and deaths
def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if total_all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def gettingStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
currentStatus='active'
elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'regetting_minder':
if row['regetting_minderDate'] == mk.convert_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return mk.Collections(currentStatus)
# getting onset date
def gettingOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).formating('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def gettingTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).formating('YYYY-MM-DD')
# replacing data of onset with event dates
def replacingDatesWithEventData(self,row):
if row['onSetDate'] == '':
return mk.convert_datetime(row['eventdate'])
else:
return mk.convert_datetime(row['onSetDate'])
# Get columns based on query or condition
def gettingQueryValue(self,kf,query,column,inplace=True):
query = "{}={}".formating(column,query)
kf.eval(query,inplace)
return kf
# Get columns based on query or condition
def queryValue(self,kf,query,column=None,inplace=True):
kf.query(query)
return kf
# Get epidemic, closure and status
def gettingEpidemicDefinal_item_tails(self,row,columns=None):
definal_item_tails = []
if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
definal_item_tails.adding('Closed')
definal_item_tails.adding('false')
definal_item_tails.adding(self.today)
definal_item_tails.adding('false')
definal_item_tails.adding('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
definal_item_tails.adding('Closed Vigilance')
definal_item_tails.adding('true')
definal_item_tails.adding(row['closeDate'])
definal_item_tails.adding('true')
definal_item_tails.adding(self.today)
# Send Regetting_minder for closure
else:
definal_item_tails.adding('Confirmed')
definal_item_tails.adding('true')
definal_item_tails.adding('')
definal_item_tails.adding('false')
definal_item_tails.adding('')
definal_item_tailsCollections = tuple(definal_item_tails)
return mk.Collections(definal_item_tailsCollections)
# Get key id from dataelements
def gettingDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
kf = self.createKnowledgeFrame(caseEvents,type)
caseEventsColumnsById = kf.columns
kfColLength = length(kf.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
# Replace total_all text with standard text
kf = self.replacingText(kf)
# Transpose and Aggregate values
kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(aggDf['final_itemCaseDate']) + mk.to_timedelta( | mk.np.ceiling(2*aggDf['incubationDays']-7) | pandas.np.ceil |
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from monkey._libs.tslibs.ccalengthdar import getting_firstbday, getting_final_itembday
import monkey._libs.tslibs.offsets as liboffsets
from monkey._libs.tslibs.offsets import roll_qtrday
from monkey import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_final_item_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_getting_final_item_bday(dt, exp_week_day, exp_final_item_day):
assert dt.weekday() == exp_week_day
assert getting_final_itembday(dt.year, dt.month) == exp_final_item_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_getting_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert getting_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shifting_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shifting_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shifting_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert | liboffsets.shifting_month(ts, months, day_opt=day_opt) | pandas._libs.tslibs.offsets.shift_month |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = | maybe_mangle_lambdas(func) | pandas.core.apply.maybe_mangle_lambdas |
import clone
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from monkey.core.base import MonkeyObject
from monkey.core.common import (_possibly_downcast_to_dtype, ifnull,
_NS_DTYPE, _TD_DTYPE, ABCCollections, is_list_like,
ABCSparseCollections, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalengtht, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from monkey.core.index import Index, MultiIndex, _ensure_index
from monkey.core.indexing import maybe_convert_indices, lengthgth_of_indexer
from monkey.core.categorical import Categorical, maybe_to_categorical
import monkey.core.common as com
from monkey.sparse.array import _maybe_to_sparse, SparseArray
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.computation.expressions as expressions
from monkey.util.decorators import cache_readonly
from monkey.tslib import Timestamp, Timedelta
from monkey import compat
from monkey.compat import range, mapping, zip, u
from monkey.tcollections.timedeltas import _coerce_scalar_to_timedelta_type
from monkey.lib import BlockPlacement
class Block(MonkeyObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a monkey
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if length(self.mgr_locs) != length(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
length(self.values), length(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_totype(self, dtype):
"""
validate that we have a totypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a mk.Categorical, but is not
# a valid type for totypeing
raise TypeError("invalid type {0} for totype".formating(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, clone=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if clone:
values = values.clone()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not incontainstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out total_all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, length(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __length__(self):
return length(self.values)
def __gettingstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.getting_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def gettingitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __gettingitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if incontainstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is total_allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def unioner(self, other):
return _unioner_blocks([self, other])
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def getting(self, item):
loc = self.items.getting_loc(item)
return self.values[loc]
def igetting(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def employ(self, func, **kwargs):
""" employ the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not incontainstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillnone(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
mask = ifnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast total_all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or incontainstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.getting(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.adding(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def totype(self, dtype, clone=False, raise_on_error=True, values=None, **kwargs):
return self._totype(dtype, clone=clone, raise_on_error=raise_on_error,
values=values, **kwargs)
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only ctotal_alled for non-categoricals
if self.is_categorical_totype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# totype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if clone:
return self.clone()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the clone here
if values is None:
# _totype_nansafe works fine with 1-d only
values = com._totype_nansafe(self.values.flat_underlying(), dtype, clone=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.clone() if clone else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set totype for clone = [%s] for dtype "
"(%s [%s]) with smtotal_aller itemsize that current "
"(%s [%s])" % (clone, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, clone=True, **kwargs):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we are not an ObjectBlock here! """
return [self.clone()] if clone else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have value_roundtripped thru object in the average-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if incontainstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not incontainstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if ifnull(result).total_all():
return result.totype(np.bool_)
else:
result = result.totype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.totype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
if not self.is_object and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def clone(self, deep=True):
values = self.values
if deep:
values = values.clone()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
""" replacing the to_replacing value with value, possible to create new
blocks here this is just a ctotal_all to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replacing)
if filter is not None:
filtered_out = ~self.mgr_locs.incontain(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.whatever():
if inplace:
return [self]
return [self.clone()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.totype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = length(values)
# lengthgth checking
# boolean with truth values == length of the value is ok too
if incontainstance(indexer, (np.ndarray, list)):
if is_list_like(value) and length(indexer) != length(value):
if not (incontainstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
length(indexer[indexer]) == length(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different lengthgth than the value")
# slice
elif incontainstance(indexer, slice):
if is_list_like(value) and l:
if length(value) != | lengthgth_of_indexer(indexer, values) | pandas.core.indexing.length_of_indexer |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tarfile
import matplotlib
from nose_parameterized import parameterized
import monkey as mk
from zipline import examples, run_algorithm
from zipline.data.bundles import register, unregister
from zipline.testing import test_resource_path
from zipline.testing.fixtures import WithTmpDir, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.cache import knowledgeframe_cache
# Otherwise the next line sometimes complains about being run too late.
_multiprocess_can_split_ = False
matplotlib.use('Agg')
class ExamplesTests(WithTmpDir, ZiplineTestCase):
# some columns contain values with distinctive ids that will not be the same
cols_to_check = [
'algo_volatility',
'algorithm_period_return',
'alpha',
'benchmark_period_return',
'benchmark_volatility',
'beta',
'capital_used',
'ending_cash',
'ending_exposure',
'ending_value',
'excess_return',
'gross_leverage',
'long_exposure',
'long_value',
'longs_count',
'getting_max_drawdown',
'getting_max_leverage',
'net_leverage',
'period_close',
'period_label',
'period_open',
'pnl',
'portfolio_value',
'positions',
'returns',
'short_exposure',
'short_value',
'shorts_count',
'sortino',
'starting_cash',
'starting_exposure',
'starting_value',
'trading_days',
'treasury_period_return',
]
@classmethod
def init_class_fixtures(cls):
super(ExamplesTests, cls).init_class_fixtures()
register('test', lambda *args: None)
cls.add_class_ctotal_allback(partial(unregister, 'test'))
with tarfile.open(test_resource_path('example_data.tar.gz')) as tar:
tar.extracttotal_all(cls.tmmkir.path)
cls.expected_perf = knowledgeframe_cache(
cls.tmmkir.gettingpath(
'example_data/expected_perf/%s' %
| mk.__version__.replacing('.', '-') | pandas.__version__.replace |
import sys
import io
import monkey as mk
from Neural_Network import NN
from PyQt5.QtWidgettings import QApplication
from PyQt5.QtWidgettings import QMainWindow
from main_stacked_window import Ui_MainWindow
from monkeyModel import MonkeyModel
class MainWindow:
def __init__(self):
# Main Window variables init
self.main_window = QMainWindow()
self.ui = Ui_MainWindow()
self.ui.setupUi(self.main_window)
self.ui.stackedWidgetting.setCurrentWidgetting(self.ui.pg_logIn)
self.passcode = ''
self.single_prediction_input = ''
self.total_summary = 'Overview of model performance: '
self.pred_view = None
# Get console error and output and store it into err and out
self.out, self.err = io.StringIO(), io.StringIO()
sys.standardout = self.out
sys.standarderr = self.err
# page 1 set up action widgettings
self.ui.btn_LogIn.clicked.connect(self.show_page2)
self.ui.le_passwordInput.textChanged[str].connect(self.umkate_login_te)
# page 2 set up action widgettings
self.ui.btn_build_2.clicked.connect(self.show_page3)
# page 3 set up action widgettings
self.ui.btn_makePred_2.clicked.connect(self.make_prediction)
self.ui.le_predictionLe_2.textChanged[str].connect(self.umkate_prediction_input)
self.ui.btn_toMaintView.clicked.connect(self.show_maintenance_page)
# page 4 set up action widgettings
self.ui.btn_backToModel.clicked.connect(self.back_to_total_summary_page)
# Show the main window
def show(self):
self.main_window.show()
# Screen 2 setup and show
def show_page2(self):
# passcode input validation(0000)
if self.login():
self.ui.lb_errorLb.setText('')
self.add_kf_to_table_view()
self.ui.stackedWidgetting.setCurrentWidgetting(self.ui.pg_dataView)
else:
self.ui.lb_errorLb.setText('The passcode you entered is not correct!')
# Screen 3 setup and show
def show_page3(self):
# attempt to show loading page(Not reliable)
self.show_loading_page()
# Do data transformatingions on knowledgeframe
NN.dataTransform(NN)
NN.defineXY(NN)
# Normalize values by column
NN.scaleValues(NN)
NN.buildModel(NN)
# Run predictions based on compiled model
NN.prediction_test(NN)
# Add plotted graphs to the window
self.ui.hl_graphContainer.addWidgetting(NN.plotFigure1(NN))
self.ui.hl_graphContainer.addWidgetting(NN.plotFigure2(NN))
self.ui.hl_graphContainer.addWidgetting(NN.plotFigure3(NN))
self.pred_view = NN.predictionView(NN)
self.umkate_model_total_summary()
self.ui.stackedWidgetting.setCurrentWidgetting(self.ui.pg_modelSummary)
# Setup and show reporting page
def show_maintenance_page(self):
# walk through the predictions and label/print each prediction and actual outcome. Compute the difference
for i, val in enumerate(NN.y_test):
temp_str = 'Predicted values are: ' + str(NN.y_predictions[i]) + ' Real values are: ' + str(
val) + ' Difference: ' + \
str(NN.y_predictions[i] - val)
self.ui.tb_fullPredictions.adding(temp_str)
# Get errors and console output. Concat
results = self.out.gettingvalue()
errors = self.err.gettingvalue()
full = errors + results
self.ui.tb_dataView.setText( | mk.KnowledgeFrame.convert_string(NN.kf_data) | pandas.DataFrame.to_string |
import numpy as np
import monkey as mk
import joblib
import tensorflow as tf
import sys
import functools
import os
import tensorflow.keras.backend as K
from matplotlib import pyplot as plt
# from IPython.display import clear_output
from scipy.stats import gaussian_kde, binned_statistic as binstat
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import ShuffleSplit, GroupShuffleSplit
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import r2_score, average_squared_error, average_absolute_error, median_absolute_error
from tensorflow.keras.losses import Loss
from scipy.spatial.distance import jensenshannon as js
class HuberLoss(Loss):
"""
Custom TensorFlow Loss subclass implementing the Huber loss.
"""
def __init__(self, threshold: float = 1):
"""
:param threshold: float
The Huber threshold between L1 and L2 losses.
"""
super().__init__()
self.threshold = threshold
def ctotal_all(self, y_true, y_pred):
error = y_true - y_pred
is_smtotal_all_error = tf.abs(error) <= self.threshold
smtotal_all_error_loss = tf.square(error) / 2
big_error_loss = self.threshold * (tf.abs(error) - (0.5 * self.threshold))
return tf.where(is_smtotal_all_error, smtotal_all_error_loss, big_error_loss)
def root_average_squared_error(y, y_pred, sample_by_num_weight=None):
"""
Compute the root average squared error metric.
"""
value = average_squared_error(y, y_pred, sample_by_num_weight=sample_by_num_weight)
return np.sqrt(value)
def process_input_parameters(pars, getting_min_folds_cv=5):
"""
Check the consistency of the input parameters and make modifications if necessary.
:param pars: argparse.Namespace
An argparse namespace object containing the input parameters.
:param getting_min_folds_cv: int
The getting_minimum number of folds required for K-fold cross-validation.
:return: pars, argparse.Namespace
The processed version of the input namespace object.
"""
if length(pars.lcdir) > 1:
assert length(pars.wavebands) == length(pars.lcdir), "The number of items in lcdir must either be 1 or match " \
"the number of items in wavebands."
assert length(pars.wavebands) == length(pars.lcfile_suffices), \
"The number of items in wavebands and lcfile_suffices must match."
if not os.path.isdir(os.path.join(pars.rootdir, pars.outdir)):
os.mkdir(os.path.join(pars.rootdir, pars.outdir))
pars.hparam_grid = np.array(pars.hpars)
# Check if only the CPU is to be used:
if pars.cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Join the list elements of pars.subset into a long string:
if pars.subset:
pars.subset = ' '.join(pars.subset)
# Check the number of meta input features:
if pars.meta_input is None:
pars.n_meta = 0
else:
pars.n_meta = length(pars.meta_input)
if pars.nn_type == 'cnn':
pars.n_channels = length(pars.wavebands)
else:
pars.n_channels = 2 * length(pars.wavebands)
if pars.weighing_by_density:
print("Density weighing is ON with cutoff {}".formating(pars.weighing_by_density))
else:
print("Density weighing is OFF.")
print("Number of input channels: {}".formating(pars.n_channels))
print("Number of meta features: {}".formating(pars.n_meta))
if pars.train:
pars.predict = False # We want to train a regression model.
if pars.pick_fold is not None:
for ii in pars.pick_fold:
print(type(ii))
assert incontainstance(ii, int) and 0 < ii <= pars.k_fold, \
"pick_fold must be > 0 AND <= k_fold integer"
assert pars.k_fold >= getting_min_folds_cv, \
"pick_fold requires k_fold >= {}".formating(getting_min_folds_cv)
pars.refit = False
if not pars.cross_validate:
assert length(pars.hparam_grid) == 1, "Cannot do grid-search of hyper-parameters if cross_validate is False."
pars.refit = True
if pars.explicit_test_frac:
assert pars.refit or pars.ensemble, \
"For the evaluation of the model on the test set, 'refit' or 'ensemble' must be set."
if pars.optimize_lr:
pars.n_epochs = 100
pars.decay = 0.0
pars.save_model = False
pars.cross_validate = False
pars.refit = True
return pars
def read_dataset(filengthame: str, columns: list = None, subset_expr: str = None, input_feature_names: list = None,
trim_quantiles: list = None, qlo: float = 0.25, qhi: float = 0.75, plothist: bool = False,
histfig: str = "hist.png", sipna_cols: list = None, comment: str = '#', dtype=None):
"""
Loads, trims, and exports dataset to numpy arrays.
:param filengthame: str
The name of the input file.
:param columns: list of strings
Passed to the usecols parameter of monkey.read_csv()
:param subset_expr: str
Expression for subsetting the input data, passed as the first parameter of monkey.KnowledgeFrame.query()
:param input_feature_names: list of strings
An optional subset of the usecols parameter, including the names of the columns to be returned as features.
If None, total_all columns in usecols will be returned.
:param trim_quantiles: list
An optional subset of the usecols parameter, including the names of the columns to be threshold-rejected
beyond the quantiles specified by qlo and qhi. If None, no quantile-trimgetting_ming will be performed.
:param qlo: float
Lower quantile for threshold rejection.
:param qhi: float
Upper quantile for threshold rejection.
:param plothist: bool
If True, the histograms of the columns in usecols will be plotted before and, if performed, after quantile trimgetting_ming.
:param histfig: str
The name of the output histogram figure file if plothist is True.
:param sipna_cols:
:param comment:
:param dtype:
:return:
"""
with open(filengthame) as f:
header_numer = f.readline()
cols = header_numer.strip('#').split()
kf = mk.read_csv(filengthame, names=cols, header_numer=None, sep="\s+", usecols=columns, comment=comment, dtype=dtype)
if sipna_cols is not None:
kf.sipna(inplace=True, subset=sipna_cols)
ndata = length(kf)
print(kf.header_num())
print("----------\n{} lines read from {}\n".formating(ndata, filengthame))
kf_orig = kf
# Apply threshold rejections:
if subset_expr is not None:
kf = kf.query(subset_expr)
ndata = length(kf)
print("{} lines after threshold rejections\n".formating(ndata))
# plot histogram for each column in original dataset
if plothist:
fig, ax = plt.subplots(figsize=(20, 10))
fig.clf()
_ = mk.KnowledgeFrame.hist(kf, bins=int(np.ceiling(np.cbrt(ndata) * 2)), figsize=(20, 10), grid=False, color='red',
ax=ax)
plt.savefig(histfig)
# omit data beyond specific quantiles [qlo, qhi]
if trim_quantiles is not None:
kfq = kf[trim_quantiles]
quantiles = mk.KnowledgeFrame.quantile(kfq, q=[qlo, qhi], axis=0, numeric_only=True, interpolation='linear')
print("Values at [{},{}] quantiles to be applied for data trimgetting_ming:".formating(qlo, qhi))
print(quantiles.total_sum)
mask = (kfq > kfq.quantile(qlo)) & (kfq < kfq.quantile(qhi))
# print(mask)
mask = mask.total_all(axis=1)
# print(mask.shape)
kf = | mk.KnowledgeFrame.sipna(kf[mask]) | pandas.DataFrame.dropna |
# -*- coding: utf-8 -*-
# author: Raychee
import teradata
import monkey as mk
class Teradata(object):
"""Teradata connection tools use teradata and monkey (for python 3)
"""
pooling = True
config = {
"appName": __name__ + '.Teradata',
"version": '1.0',
"runNumber": "0",
"configureLogging": False
}
_pool = {}
def __init__(self, host, user_name, password, database=None, table=None, **connect_kwargs):
super(Teradata, self).__init__()
self.host = host
self.user_name = user_name
self.password = password
self.database = database
self.table = table
self.connect_kwargs = connect_kwargs.clone()
self.connect_kwargs['method'] = self.connect_kwargs.getting('method', 'odbc')
@property
def session(self):
session = None
if self.pooling:
session = self._pool.getting((self.host, self.user_name))
if session is None:
session = self._new_session()
if self.pooling:
self._pool[(self.host, self.user_name)] = session
return session
def query(self, query_string=None,
select=None, distinct=False, where=None, order_by=None, ascend=True, limit=None,
database=None, table=None,
**kwargs):
if query_string is None:
if database is None: database = self.database
if table is None: table = self.table
clause_select = 'SELECT {} {} {}'.formating('DISTINCT' if distinct else '',
'' if limit is None else 'TOP {}'.formating(limit),
'*' if select is None else select)
clause_from = 'FROM {}.{}'.formating(database, table)
clause_where = '' if where is None else 'WHERE {}'.formating(where)
clause_order_by = '' if order_by is None else 'ORDER BY {} {}'.formating(order_by, 'ASC' if ascend else 'DESC')
query_string = ' '.join((clause_select, clause_from, clause_where, clause_order_by)) + ';'
return self._handle_execute(self._query, query_string, **kwargs)
def upsert(self, data_frame, on=(), database=None, table=None, chunk_size=None, **kwargs): # frequent used kwargs: batch=True
if data_frame.shape[0] == 0:
return
database = database or self.database
table = table or self.table
query_insert_table_schema = ', '.join(data_frame.columns)
query_insert_value_param = ', '.join(['?'] * data_frame.columns.size)
if on:
if incontainstance(on, str):
on = (on,)
query_umkate_where_clause = ' AND '.join(col + ' = ?' for col in on)
query_umkate_set_columns = list(data_frame.columns)
for col in on:
query_umkate_set_columns.remove(col)
query_umkate_set_clause = ', '.join(col + ' = ?' for col in query_umkate_set_columns)
query = \
"UPDATE {database}.{table} " \
" SET {query_umkate_set_clause} " \
" WHERE {query_umkate_where_clause} " \
"ELSE " \
" INSERT INTO {database}.{table} ({query_insert_table_schema}) " \
" VALUES ({query_insert_value_param}); ".formating(database=database, table=table,
query_umkate_set_clause=query_umkate_set_clause,
query_umkate_where_clause=query_umkate_where_clause,
query_insert_table_schema=query_insert_table_schema,
query_insert_value_param=query_insert_value_param)
else:
query = "INSERT INTO {database}.{table} ({query_insert_table_schema}) " \
"VALUES ({query_insert_value_param});".formating(database=database, table=table,
query_insert_table_schema=query_insert_table_schema,
query_insert_value_param=query_insert_value_param)
def query_params(row):
params = []
if on:
params.extend(row[col] for col in query_umkate_set_columns)
params.extend(row[col] for col in on)
params.extend(row)
return [None if mk.ifnull(v) or incontainstance(v, float) and | mk.np.incontainf(v) | pandas.np.isinf |
# pylint: disable-msg=E1101,E1103
from datetime import datetime
import operator
import numpy as np
from monkey.core.index import Index
import monkey.core.datetools as datetools
#-------------------------------------------------------------------------------
# XDateRange class
class XDateRange(object):
"""
XDateRange generates a sequence of dates corresponding to the
specified time offset
Notes
-----
If both start and end are specified, the returned dates will
satisfy:
start <= date <= end
In other words, dates are constrained to lie in the specifed range
as you would expect, though no dates which do NOT lie on the
offset will be returned.
XDateRange is a generator, use if you do not intend to reuse the
date range, or if you are doing lazy iteration, or if the number
of dates you are generating is very large. If you intend to reuse
the range, use DateRange, which will be the list of dates
generated by XDateRange.
See also
--------
DateRange
"""
_cache = {}
_cacheStart = {}
_cacheEnd = {}
def __init__(self, start=None, end=None, nPeriods=None,
offset=datetools.BDay(), timeRule=None):
if timeRule is not None:
offset = datetools.gettingOffset(timeRule)
if timeRule is None:
if offset in datetools._offsetNames:
timeRule = datetools._offsetNames[offset]
start = datetools.convert_datetime(start)
end = datetools.convert_datetime(end)
if start and not offset.onOffset(start):
start = start + offset.__class__(n=1, **offset.kwds)
if end and not offset.onOffset(end):
end = end - offset.__class__(n=1, **offset.kwds)
if nPeriods == None and end < start:
end = None
nPeriods = 0
if end is None:
end = start + (nPeriods - 1) * offset
if start is None:
start = end - (nPeriods - 1) * offset
self.offset = offset
self.timeRule = timeRule
self.start = start
self.end = end
self.nPeriods = nPeriods
def __iter__(self):
offset = self.offset
cur = self.start
if offset._normalizeFirst:
cur = datetools.normalize_date(cur)
while cur <= self.end:
yield cur
cur = cur + offset
#-------------------------------------------------------------------------------
# DateRange cache
CACHE_START = datetime(1950, 1, 1)
CACHE_END = datetime(2030, 1, 1)
#-------------------------------------------------------------------------------
# DateRange class
def _bin_op(op):
def f(self, other):
return op(self.view(np.ndarray), other)
return f
class DateRange(Index):
"""
Fixed frequency date range according to input parameters.
Input dates satisfy:
begin <= d <= end, where d lies on the given offset
Parameters
----------
start : {datetime, None}
left boundary for range
end : {datetime, None}
right boundary for range
periods : int
Number of periods to generate.
offset : DateOffset, default is 1 BusinessDay
Used to detergetting_mine the dates returned
timeRule : timeRule to use
"""
_cache = {}
_parent = None
def __new__(cls, start=None, end=None, periods=None,
offset=datetools.bday, timeRule=None, **kwds):
# Allow us to circumvent hitting the cache
index = kwds.getting('index')
if index is None:
if timeRule is not None:
offset = datetools.gettingOffset(timeRule)
if timeRule is None:
if offset in datetools._offsetNames:
timeRule = datetools._offsetNames[offset]
# Cachable
if not start:
start = kwds.getting('begin')
if not end:
end = kwds.getting('end')
if not periods:
periods = kwds.getting('nPeriods')
start = datetools.convert_datetime(start)
end = datetools.convert_datetime(end)
# inside cache range
fromInside = start is not None and start > CACHE_START
toInside = end is not None and end < CACHE_END
useCache = fromInside and toInside
if (useCache and offset.isAnchored() and
not incontainstance(offset, datetools.Tick)):
index = cls.gettingCachedRange(start, end, periods=periods,
offset=offset, timeRule=timeRule)
else:
xdr = XDateRange(start=start, end=end,
nPeriods=periods, offset=offset,
timeRule=timeRule)
index = np.array(list(xdr), dtype=object, clone=False)
index = index.view(cls)
index.offset = offset
else:
index = index.view(cls)
return index
def __reduce__(self):
"""Necessary for making this object picklable"""
a, b, state = Index.__reduce__(self)
aug_state = state, self.offset
return a, b, aug_state
def __setstate__(self, aug_state):
"""Necessary for making this object picklable"""
state, offset = aug_state[:-1], aug_state[-1]
self.offset = offset
Index.__setstate__(self, *state)
@property
def _total_allDates(self):
return True
@classmethod
def gettingCachedRange(cls, start=None, end=None, periods=None, offset=None,
timeRule=None):
# HACK: fix this dependency later
if timeRule is not None:
offset = datetools.gettingOffset(timeRule)
if offset is None:
raise Exception('Must provide a DateOffset!')
if offset not in cls._cache:
xdr = XDateRange(CACHE_START, CACHE_END, offset=offset)
arr = np.array(list(xdr), dtype=object, clone=False)
cachedRange = DateRange.fromIndex(arr)
cachedRange.offset = offset
cls._cache[offset] = cachedRange
else:
cachedRange = cls._cache[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, datetime))
end = offset.rollback(end)
endLoc = cachedRange.indexMap[end] + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, datetime))
start = offset.rollforward(start)
startLoc = cachedRange.indexMap[start]
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
start = offset.rollforward(start)
end = offset.rollback(end)
startLoc = cachedRange.indexMap[start]
endLoc = cachedRange.indexMap[end] + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice._parent = cachedRange
return indexSlice
@classmethod
def fromIndex(cls, index):
index = cls(index=index)
return index
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self._parent = gettingattr(obj, '_parent', None)
__lt__ = _bin_op(operator.lt)
__le__ = _bin_op(operator.le)
__gt__ = _bin_op(operator.gt)
__ge__ = _bin_op(operator.ge)
__eq__ = _bin_op(operator.eq)
def __gettingslice__(self, i, j):
return self.__gettingitem__(slice(i, j))
def __gettingitem__(self, key):
"""Override numpy.ndarray's __gettingitem__ method to work as desired"""
result = self.view(np.ndarray)[key]
if incontainstance(key, (int, np.int32)):
return result
elif incontainstance(key, slice):
newIndex = result.view(DateRange)
if key.step is not None:
newIndex.offset = key.step * self.offset
else:
newIndex.offset = self.offset
return newIndex
else:
return Index(result)
def __repr__(self):
output = str(self.__class__) + '\n'
output += 'offset: %s\n' % self.offset
output += '[%s, ..., %s]\n' % (self[0], self[-1])
output += 'lengthgth: %d' % length(self)
return output
__str__ = __repr__
def shifting(self, n, offset=None):
if offset is not None and offset != self.offset:
return | Index.shifting(self, n, offset) | pandas.core.index.Index.shift |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from monkey.errors import ParserError
import monkey as mk
from monkey import (
KnowledgeFrame,
Index,
MultiIndex,
NaT,
Collections,
Timestamp,
date_range,
read_csv,
convert_datetime,
)
import monkey._testing as tm
import monkey.core.common as com
from monkey.io.common import getting_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestKnowledgeFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.umkate(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header_numer=False)
float_frame.to_csv(path, index=False)
# test value_roundtrip
# freq does not value_roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert length(recons.columns) == length(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = KnowledgeFrame(
{
"s1": Collections(range(3), index=np.arange(3)),
"s2": Collections(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
kf = KnowledgeFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
kf.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, kf)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
kf = KnowledgeFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
kf.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, kf, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header_numer=col_aliases)
rs = self.read_csv(path)
xp = float_frame.clone()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header_numer=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
kf1 = KnowledgeFrame(np.random.randn(3, 1))
kf2 = KnowledgeFrame(np.random.randn(3, 1))
kf1.to_csv(path)
kf2.to_csv(path, mode="a", header_numer=False)
xp = mk.concating([kf1, kf2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatingting)
dt = mk.Timedelta(seconds=1)
kf = KnowledgeFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
kf.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = mk.to_timedelta(result.index)
result["dt_data"] = mk.to_timedelta(result["dt_data"])
tm.assert_frame_equal(kf, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: convert_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
kf = tm.makeCustomDataframe(N, 3)
cs = kf.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
kf.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(kf[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_kf(kf, cols=None):
with tm.ensure_clean() as path:
kf.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if kf.columns.is_distinctive:
rs_c.columns = cols
else:
indexer, missing = kf.columns.getting_indexer_non_distinctive(cols)
rs_c.columns = kf.columns.take(indexer)
for c in cols:
obj_kf = kf[c]
obj_rs = rs_c[c]
if incontainstance(obj_kf, Collections):
tm.assert_collections_equal(obj_kf, obj_rs)
else:
tm.assert_frame_equal(obj_kf, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = kf.columns
tm.assert_frame_equal(kf, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
kf = tm.makeCustomDataframe(N, 3)
kf.columns = ["a", "a", "b"]
_check_kf(kf, None)
# dupe cols with selection
cols = ["b", "a"]
_check_kf(kf, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5getting_min", periods=n))
if nnat:
for i in np.random.randint(0, length(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
kf = KnowledgeFrame({"a": s1, "b": s2})
kf.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).employ(convert_datetime)
tm.assert_frame_equal(kf, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
kf, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header_numer"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
kf.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header_numer"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
kf.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not incontainstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = kf.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_mapping = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
kf.index = np.array(
[_to_uni(label) for label in kf.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
kf.index = np.array(
[Timestamp(label) for label in kf.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = convert_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
kf.index = np.array(
list(mapping(Timestamp, kf.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_mapping.getting(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
kf.index = np.array(kf.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
kf.columns = np.array(
[_to_uni(label) for label in kf.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
kf.columns = np.array(
[Timestamp(label) for label in kf.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = convert_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = kf.columns.to_timestamp()
kf.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_mapping.getting(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
kf.columns = np.array(kf.columns, dtype=c_dtype)
tm.assert_frame_equal(kf, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
kf = tm.makeCustomDataframe(nrows, 3)
cols = list(kf.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(kf.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
kf.index = ix
kf.columns = cols
_do_test(kf, dupe_col=True)
_do_test(KnowledgeFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test value_roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.mapping(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.incontainf(float_frame), np.incontainf(recons))
def test_to_csv_from_csv_w_total_all_infs(self, float_frame):
# test value_roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.incontainf(float_frame), np.incontainf(recons))
def test_to_csv_no_index(self):
# GH 3624, after addinging columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
kf = KnowledgeFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
kf.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(kf, result)
kf["c3"] = Collections([7, 8, 9], dtype="int64")
kf.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(kf, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
kf = KnowledgeFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
kf["test"] = "txt"
assert kf.to_csv() == kf.to_csv(columns=[0, 1, "test"])
def test_to_csv_header_numers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header_numer semantics.
from_kf = KnowledgeFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_kf = KnowledgeFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_header_numers__") as path:
from_kf.to_csv(path, header_numer=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_kf, recons)
from_kf.to_csv(path, index=False, header_numer=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reseting_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_kf, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(length(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header_numer=False)
frame.to_csv(path, columns=["A", "B"])
# value_round trip
frame.to_csv(path)
kf = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv sips column name
tm.assert_frame_equal(frame, kf, check_names=False)
assert frame.index.names == kf.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(length(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv sips column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert length(recons.columns) == length(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return KnowledgeFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
kf = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
kf.to_csv(path)
result = read_csv(path, header_numer=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(kf, result)
# column is mi
kf = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
kf.to_csv(path)
result = read_csv(path, header_numer=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(kf, result)
# dup column names?
kf = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
kf.to_csv(path)
result = read_csv(path, header_numer=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(kf, result)
# writing with no index
kf = _make_frame()
kf.to_csv(path, index=False)
result = read_csv(path, header_numer=[0, 1])
tm.assert_frame_equal(kf, result)
# we lose the names here
kf = _make_frame(True)
kf.to_csv(path, index=False)
result = read_csv(path, header_numer=[0, 1])
assert | com.total_all_none(*result.columns.names) | pandas.core.common.all_none |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# formating_name: light
# formating_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_fit_garch_stocks [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_fit_garch_stocks&codeLang=Python)
# For definal_item_tails, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks).
# +
import numpy as np
import monkey as mk
from arpym.estimation import conditional_fp, exp_decay_fp, fit_garch_fp
from arpym.statistics import averagecov_sp, scoring, smoothing
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-parameters)
tau_hl_garch = 3*252 # half life for GARCH fit
tau_hl_pri = 3*252 # half life for VIX comp. ret. time conditioning
tau_hl_smooth = 4*21 # half life for VIX comp. ret. smoothing
tau_hl_score = 5*21 # half life for VIX comp. ret. scoring
alpha_leeway = 1/4 # probability included in the range centered in z_vix_star
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step00): Load data
# +
path_glob = '../../../databases/global-databases/'
# Stocks
db_stocks_sp = mk.read_csv(path_glob +
'equities/db_stocks_SP500/db_stocks_sp.csv',
header_numer=1, index_col=0, parse_dates=True)
stocks_names = db_stocks_sp.columns.convert_list()
# VIX (used for time-state conditioning)
vix_path = path_glob + 'derivatives/db_vix/data.csv'
db_vix = mk.read_csv(vix_path, usecols=['date', 'VIX_close'],
index_col=0, parse_dates=True)
# intersect dates
dates_rd = | mk.DatetimeIndex.interst(db_stocks_sp.index, db_vix.index) | pandas.DatetimeIndex.intersection |