prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import os
import time
from minder_utils.configurations import config
from .format_util import iter_dir
from minder_utils.download.download import Downloader
from minder_utils.util.decorators import load_save
from minder_utils.formatting.format_tihm import format_tihm_data
import numpy as np
from minder_utils.util.util import reformat_path
from .label import label_dataframe
class Formatting:
"""
Process the data to the following dataframe:
Patient id, device type, time, value
"""
def __init__(self, path=os.path.join('./data', 'raw_data'), add_tihm=None):
self.path = reformat_path(path)
self.add_tihm = add_tihm
self.activity_nice_locations = config['activity_nice_locations']
categories_check = ['device_types', 'homes', 'patients']
if not np.all([os.path.exists(os.path.join(path, category + '.csv')) for category in categories_check]):
print('Downloading required files for formatting')
dl = Downloader()
dl.export(categories=['device_types', 'homes', 'patients'],
reload=True, since=None, until=None, save_path=path, append=False)
print('Required files downloaded')
self.device_type = \
pd.read_csv(os.path.join(self.path, 'device_types.csv'))[['id', 'type']].set_index('id').to_dict()['type']
self.config = config
@property
@load_save(**config['physiological']['save'])
def physiological_data(self):
add_tihm = config['physiological']['add_tihm'] if self.add_tihm is None else self.add_tihm
if add_tihm:
data = self.process_data('physiological')
tihm_data = format_tihm_data()
return label_dataframe(pd.concat([data, tihm_data['physiological']]))
return label_dataframe(self.process_data('physiological').drop_duplicates())
@property
@load_save(**config['activity']['save'])
def activity_data(self):
add_tihm = config['activity']['add_tihm'] if self.add_tihm is None else self.add_tihm
if add_tihm:
data = self.process_data('activity')
tihm_data = format_tihm_data()
return label_dataframe(pd.concat([data, tihm_data['activity']]).drop_duplicates().sort_values('time'))
return label_dataframe(self.process_data('activity')).sort_values('time')
@property
@load_save(**config['environmental']['save'])
def environmental_data(self):
return label_dataframe(self.process_data('environmental'))
@property
@load_save(**config['sleep']['save'])
def sleep_data(self):
return label_dataframe(self.process_data('sleep')).sort_values('time').reset_index(drop=True)
def process_data(self, datatype):
assert datatype in ['physiological', 'activity', 'environmental', 'sleep'], 'not a valid type'
process_func = getattr(self, 'process_{}_data'.format(datatype))
dataframe = pd.DataFrame(columns=self.config[datatype]['columns'])
for name in iter_dir(self.path):
start_time = time.time()
print('Processing: {} -------> {}'.format(datatype, name).ljust(80, ' '), end='')
if name in self.config[datatype]['type']:
dataframe = process_func(name, dataframe)
end_time = time.time()
print('Finished in {:.2f} seconds'.format(end_time - start_time))
return dataframe
def process_sleep_data(self, name, df):
'''
This function will process the sleep data.
'''
col_filter = ['patient_id', 'start_date']
categorical_columns = self.config['sleep']['categorical_columns']
value_columns = self.config['sleep']['value_columns']
data_adding = pd.read_csv(os.path.join(self.path, name + '.csv'))
data_adding = data_adding[data_adding.start_date != 'start_date']
categorical_columns = [column for column in categorical_columns if column in list(data_adding.columns)]
if len(categorical_columns) != 0:
data_cat = data_adding[col_filter+categorical_columns].copy()
data_cat.replace({False: 0, True: 1}, inplace=True)
data_cat = pd.melt(data_cat.merge(
pd.get_dummies(data_cat[categorical_columns]),
left_index=True, right_index=True
).drop(categorical_columns,
axis=1),
id_vars=col_filter,
var_name='location',
value_name='value')
data_cat = data_cat[data_cat.value != 0]
data_cat = data_cat[data_cat['value'].notna()]
data_cat.value = data_cat.value.astype(float)
else:
data_cat = None
value_columns = [column for column in value_columns if column in list(data_adding.columns)]
if len(value_columns) != 0:
data_val = data_adding[col_filter+value_columns].copy()
data_val.replace({False: 0, True: 1}, inplace=True)
data_val = pd.melt(data_val,
id_vars=col_filter,
var_name='location',
value_name='value')
data_val = data_val[data_val['value'].notna()]
data_val.value = data_val.value.astype(float)
else:
data_val = None
if (data_val is None) and (data_cat is None):
return df
data_out = pd.concat([data_cat, data_val])
data_out.columns = self.config['sleep']['columns']
data_out.time = | pd.to_datetime(data_out.time, utc=True) | pandas.to_datetime |
# previously we looked into numpy and its ndarrayobject in particular. Here
# we build on that knowledge by looking at the data structures provided by the Pandas Library.
# Pandas is a newer package built on top of NumPz and proveides an efficient
# implementation of a DataFrame.
# Here we will focus on the mechanics of using Series, DataFrame and related structures effectively.
# We will use examples drawn from real datasets where appropriate, but these examples are not
# necessarily the focus.
import numpy as np
import pandas as pd
# Series
# a pandas series is a one dimensional array of indeed data
data = | pd.Series([0.25, 0.5, 0.75, 1.0]) | pandas.Series |
"""
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = | pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']]) | pandas.to_datetime |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
| Series([NaT, NaT], dtype="datetime64[ns]") | pandas.Series |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import pandas as pd
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> pd.DataFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: pd.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.getOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_call_df = pd.DataFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_df = pd.DataFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_df = pd.concat([option_call_df, option_put_df], axis=1)
data_df['看涨合约-买量'] = pd.to_numeric(data_df['看涨合约-买量'])
data_df['看涨合约-买价'] = pd.to_numeric(data_df['看涨合约-买价'])
data_df['看涨合约-最新价'] = pd.to_numeric(data_df['看涨合约-最新价'])
data_df['看涨合约-卖价'] = pd.to_numeric(data_df['看涨合约-卖价'])
data_df['看涨合约-卖量'] = pd.to_numeric(data_df['看涨合约-卖量'])
data_df['看涨合约-持仓量'] = pd.to_numeric(data_df['看涨合约-持仓量'])
data_df['看涨合约-涨跌'] = pd.to_numeric(data_df['看涨合约-涨跌'])
data_df['行权价'] = pd.to_numeric(data_df['行权价'])
data_df['看跌合约-买量'] = pd.to_numeric(data_df['看跌合约-买量'])
data_df['看跌合约-买价'] = pd.to_numeric(data_df['看跌合约-买价'])
data_df['看跌合约-最新价'] = pd.to_numeric(data_df['看跌合约-最新价'])
data_df['看跌合约-卖价'] = pd.to_numeric(data_df['看跌合约-卖价'])
data_df['看跌合约-卖量'] = pd.to_numeric(data_df['看跌合约-卖量'])
data_df['看跌合约-持仓量'] = pd.to_numeric(data_df['看跌合约-持仓量'])
data_df['看跌合约-涨跌'] = pd.to_numeric(data_df['看跌合约-涨跌'])
return data_df
def option_cffex_hs300_daily_sina(symbol: str = "io2202P4350") -> pd.DataFrame:
"""
新浪财经-中金所-沪深300指数-指定合约-日频行情
:param symbol: 具体合约代码(包括看涨和看跌标识), 可以通过 ak.option_cffex_hs300_spot_sina 中的 call-标识 获取
:type symbol: str
:return: 日频率数据
:rtype: pd.DataFrame
"""
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
url = f"https://stock.finance.sina.com.cn/futures/api/jsonp.php/var%20_{symbol}{year}_{month}_{day}=/FutureOptionAllService.getOptionDayline"
params = {"symbol": symbol}
r = requests.get(url, params=params)
data_text = r.text
data_df = pd.DataFrame(
eval(data_text[data_text.find("[") : data_text.rfind("]") + 1])
)
data_df.columns = ["open", "high", "low", "close", "volume", "date"]
data_df = data_df[[
"date",
"open",
"high",
"low",
"close",
"volume",
]]
data_df['date'] = pd.to_datetime(data_df['date']).dt.date
data_df['open'] = pd.to_numeric(data_df['open'])
data_df['high'] = pd.to_numeric(data_df['high'])
data_df['low'] = pd.to_numeric(data_df['low'])
data_df['close'] = pd.to_numeric(data_df['close'])
data_df['volume'] = pd.to_numeric(data_df['volume'])
return data_df
# 期权-上交所-50ETF
def option_sse_list_sina(symbol: str = "50ETF", exchange: str = "null") -> List[str]:
"""
新浪财经-期权-上交所-50ETF-合约到期月份列表
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: 合约到期时间
:rtype: list
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getStockName"
params = {"exchange": f"{exchange}", "cate": f"{symbol}"}
r = requests.get(url, params=params)
data_json = r.json()
date_list = data_json["result"]["data"]["contractMonth"]
return ["".join(i.split("-")) for i in date_list][1:]
def option_sse_expire_day_sina(
trade_date: str = "202102", symbol: str = "50ETF", exchange: str = "null"
) -> Tuple[str, int]:
"""
指定到期月份指定品种的剩余到期时间
:param trade_date: 到期月份: 202002, 20203, 20206, 20209
:type trade_date: str
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: (到期时间, 剩余时间)
:rtype: tuple
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
if int(data["remainderDays"]) < 0:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{'XD' + symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.get(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
return data["expireDay"], int(data["remainderDays"])
def option_sse_codes_sina(symbol: str = "看涨期权", trade_date: str = "202202", underlying: str = "510050") -> pd.DataFrame:
"""
上海证券交易所-所有看涨和看跌合约的代码
:param symbol: choice of {"看涨期权", "看跌期权"}
:type symbol: str
:param trade_date: 期权到期月份
:type trade_date: "202002"
:param underlying: 标的产品代码 华夏上证 50ETF: 510050 or 华泰柏瑞沪深 300ETF: 510300
:type underlying: str
:return: 看涨看跌合约的代码
:rtype: Tuple[List, List]
"""
if symbol == "看涨期权":
url = "".join(
["http://hq.sinajs.cn/list=OP_UP_", underlying, str(trade_date)[-4:]]
)
else:
url = "".join(
["http://hq.sinajs.cn/list=OP_DOWN_", underlying, str(trade_date)[-4:]]
)
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_temp = data_text.replace('"', ",").split(",")
temp_list = [i[7:] for i in data_temp if i.startswith("CON_OP_")]
temp_df = pd.DataFrame(temp_list)
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
'序号',
'期权代码',
]
return temp_df
def option_sse_spot_price_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
新浪财经-期权-期权实时数据
:param symbol: 期权代码
:type symbol: str
:return: 期权量价数据
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_OP_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"买量",
"买价",
"最新价",
"卖价",
"卖量",
"持仓量",
"涨幅",
"行权价",
"昨收价",
"开盘价",
"涨停价",
"跌停价",
"申卖价五",
"申卖量五",
"申卖价四",
"申卖量四",
"申卖价三",
"申卖量三",
"申卖价二",
"申卖量二",
"申卖价一",
"申卖量一",
"申买价一",
"申买量一 ",
"申买价二",
"申买量二",
"申买价三",
"申买量三",
"申买价四",
"申买量四",
"申买价五",
"申买量五",
"行情时间",
"主力合约标识",
"状态码",
"标的证券类型",
"标的股票",
"期权合约简称",
"振幅",
"最高价",
"最低价",
"成交量",
"成交额",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_underlying_spot_price_sina(symbol: str = "sh510300") -> pd.DataFrame:
"""
期权标的物的实时数据
:param symbol: sh510050 or sh510300
:type symbol: str
:return: 期权标的物的信息
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list={symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"证券简称",
"今日开盘价",
"昨日收盘价",
"最近成交价",
"最高成交价",
"最低成交价",
"买入价",
"卖出价",
"成交数量",
"成交金额",
"买数量一",
"买价位一",
"买数量二",
"买价位二",
"买数量三",
"买价位三",
"买数量四",
"买价位四",
"买数量五",
"买价位五",
"卖数量一",
"卖价位一",
"卖数量二",
"卖价位二",
"卖数量三",
"卖价位三",
"卖数量四",
"卖价位四",
"卖数量五",
"卖价位五",
"行情日期",
"行情时间",
"停牌状态",
]
data_df = pd.DataFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_df
def option_sse_greeks_sina(symbol: str = "10003045") -> pd.DataFrame:
"""
期权基本信息表
:param symbol: 合约代码
:type symbol: str
:return: 期权基本信息表
:rtype: pandas.DataFrame
"""
url = f"http://hq.sinajs.cn/list=CON_SO_{symbol}"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.get(url, headers=headers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1: data_text.rfind('"')].split(",")
field_list = [
"期权合约简称",
"成交量",
"Delta",
"Gamma",
"Theta",
"Vega",
"隐含波动率",
"最高价",
"最低价",
"交易代码",
"行权价",
"最新价",
"理论价值",
]
data_df = pd.DataFrame(
list(zip(field_list, [data_list[0]] + data_list[4:])), columns=["字段", "值"]
)
return data_df
def option_sse_minute_sina(symbol: str = "10003720") -> pd.DataFrame:
"""
指定期权品种在当前交易日的分钟数据, 只能获取当前交易日的数据, 不能获取历史分钟数据
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的当前交易日的分钟数据
:rtype: pandas.DataFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionDaylineService.getOptionMinline"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = data_json["result"]["data"]
data_df = pd.DataFrame(temp_df)
data_df.columns = ["时间", "价格", "成交", "持仓", "均价", "日期"]
data_df = data_df[[
"日期",
"时间",
"价格",
"成交",
"持仓",
"均价"
]]
data_df['日期'] = pd.to_datetime(data_df['日期']).dt.date
data_df['日期'].ffill(inplace=True)
data_df['价格'] = pd.to_numeric(data_df['价格'])
data_df['成交'] = pd.to_numeric(data_df['成交'])
data_df['持仓'] = pd.to_numeric(data_df['持仓'])
data_df['均价'] = pd.to_numeric(data_df['均价'])
return data_df
def option_sse_daily_sina(symbol: str = "10003889") -> pd.DataFrame:
"""
指定期权的日频率数据
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的所有日频率历史数据
:rtype: pandas.DataFrame
"""
url = "http://stock.finance.sina.com.cn/futures/api/jsonp_v2.php//StockOptionDaylineService.getSymbolInfo"
params = {"symbol": f"CON_OP_{symbol}"}
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = json.loads(data_text[data_text.find("(") + 1 : data_text.rfind(")")])
temp_df = | pd.DataFrame(data_json) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
breakout_index = 0
prev_breakout_index = 0
num_samples = 90
month = "july"
num_samples_split = 10
path = str(num_samples_split) + "_normalized_refined_lfc/"
#filename_breakout = "normalized datasets 2/normalized_breakout_" + month + ".xls"
filename_goodcast = "normalized datasets 2/normalized_goodcasts_" + month + ".xls"
#filename_aug = "normalized_breakout_aug.xls"
sample = 0
num_intervals_gc = 0
num_intervals_bo = 0
def split_goodcast(filename, num_intervals_gc):
data = pd.read_excel(filename)
data = pd.DataFrame(data)
data = data.values.tolist()
nan_index = [-1]
sizes = []
data_required = []
for i in range(0,len(data)):
if(np.isnan(data[i][0])):
nan_index += [i]
for i in range(0, len(nan_index)-1):
sizes += [nan_index[i+1] - nan_index[i] - 2]
for i in range(1,len(nan_index)):
print(i)
for index in range(nan_index[i-1]+1, nan_index[i],num_samples_split):
print(index)
if(nan_index[i] == index + 1):
continue
data_required = data[index:index+num_samples_split]
num_intervals_gc += 1
| pd.DataFrame(data_required) | pandas.DataFrame |
from BoostInference_no_parallelization import Booster
import sys, pandas as pd, numpy as np
import glob, pickle
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
if len(sys.argv)<5:
print('python file.py df-val-PhyloPGM-input df-test-PhyloPGM-output info_tree fname_df_pgm_output')
exit(0)
fname_dtrain = sys.argv[1]
fname_dtest = sys.argv[2]
info_tree = sys.argv[3]
fname_df_pgm_output = sys.argv[4]
# given_pseudo_count = float(sys.argv[3])
print('fname_dtrain:', fname_dtrain,
'fname_dtest:', fname_dtest,
# 'given_pseudo_count:', given_pseudo_count
)
dtrain = | pd.read_csv(fname_dtrain, index_col=0) | pandas.read_csv |
# Import libraries
import pandas as pd
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
from urllib.request import urlopen, Request
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Parameters
n = 10 # the # of article headlines displayed per ticker
tickers = ['AAPL', 'TSLA', 'AMZN']
# Get Data
finwiz_url = 'https://finviz.com/quote.ashx?t='
news_tables = {}
for ticker in tickers:
url = finwiz_url + ticker
req = Request(url=url, headers={'user-agent': 'my-app/0.0.1'})
resp = urlopen(req)
html = BeautifulSoup(resp, features="lxml")
news_table = html.find(id='news-table')
news_tables[ticker] = news_table
try:
for ticker in tickers:
df = news_tables[ticker]
df_tr = df.findAll('tr')
print('\n')
print('Recent News Headlines for {}: '.format(ticker))
for i, table_row in enumerate(df_tr):
a_text = table_row.a.text
td_text = table_row.td.text
td_text = td_text.strip()
print(a_text, '(', td_text, ')')
if i == n - 1:
break
except KeyError:
pass
# Iterate through the news
parsed_news = []
for file_name, news_table in news_tables.items():
for x in news_table.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
ticker = file_name.split('_')[0]
parsed_news.append([ticker, date, time, text])
# Sentiment Analysis
analyzer = SentimentIntensityAnalyzer()
columns = ['Ticker', 'Date', 'Time', 'Headline']
news = pd.DataFrame(parsed_news, columns=columns)
scores = news['Headline'].apply(analyzer.polarity_scores).tolist()
df_scores = | pd.DataFrame(scores) | pandas.DataFrame |
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from twitter.twitter_data_model import User
import pandas as pd
def get_most_likely_author(usernames, tweet_to_classify, nlp):
vects = []
# Puts vectorized tweets in dataframe for each user
for username in usernames:
user = User.query.filter(User.name == username).one()
user_vects = pd.DataFrame([tweet.vect for tweet in user.tweets])
user_vects['name'] = username
vects.append(user_vects)
# Puts all the above together in single dataframe
df = pd.concat(vects)
# Create training sets, fit model
le = LabelEncoder()
y_train = le.fit_transform(df['name'])
X_train = df[[c for c in df.columns if not c == 'name']]
model = LogisticRegression()
model.fit(X_train, y_train)
# Predict for given tweet, return predicted class
X_pred = [nlp(tweet_to_classify).vector]
y_pred = model.predict(X_pred)[0]
return usernames[y_pred - 1]
def compare_two_tweets(user0, user1, tweet_to_classify, nlp):
vects = []
user_0 = User.query.filter(User.name == user0).one()
user_0_vects = | pd.DataFrame([tweet.vect for tweet in user_0.tweets]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
# MIT License
#
# Copyright (c) 2021. <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Reference:
# https://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
# import the necessary packages
import sys
import os
import unidecode
from colorama import Fore, Style
import re
import numpy as np, cv2, imutils
import pandas as pd
from keras.models import load_model
from pdf2image import convert_from_path
from PIL import Image
from datetime import datetime
from process_copy.config import re_mat
from process_copy.config import MoodleFields as MF
from process_copy.mcc import get_name, load_csv
allowed_decimals = ['0', '25', '5', '75']
corrected_decimals = ['5', '75'] # for length 1, use first one, lenght 2, use second one ...
len_mat = 7
RED = (225,6,0)
GREEN = (0,154,23)
ORANGE = (255,127,0)
BLACK=(0,0,0)
ph = 0
pw = 0
half_dpi = 0
quarter_dpi = 0
one_height_dpi = 0
def refresh(dpi=300):
global ph, pw, half_dpi, quarter_dpi, one_height_dpi
ph = int(11 * dpi)
pw = int(8.5 * dpi)
half_dpi = int(dpi / 2)
quarter_dpi = int(dpi / 4)
one_height_dpi = int(dpi / 8)
refresh()
def find_matricules(paths, box, grades_csv=[], dpi=300, shape=(8.5, 11)):
shape = (int(dpi * shape[0]), int(dpi * shape[1]))
# loading our CNN model
classifier = load_model('digit_recognizer.h5')
# load csv
grades_dfs, grades_names = load_csv(grades_csv)
root_dir = None
# list files and directories
matricules_data = {}
duplicates = set()
invalid = []
for path in paths:
r = os.path.dirname(path)
if not root_dir:
root_dir = r
elif root_dir.count('/') > r.count('/'):
root_dir = r
for root, dirs, files in os.walk(path):
for f in files:
if not f.endswith('.pdf'):
continue
file = os.path.join(root, f)
if os.path.isfile(file):
grays = gray_images(file, shape=shape)
if grays is None:
print(Fore.RED + "%s: No valid pdf" % f + Style.RESET_ALL)
continue
mat, id_box, id_group = find_matricule(grays, box['front'], box['regular'], classifier, grades_dfs,
separate_box=box['separate_box'])
name = grades_dfs[id_group].at[mat, MF.name] if id_group is not None else mat
if name:
name = unidecode.unidecode(name)
if not mat:
print(Fore.RED + "No matricule found for %s" % f + Style.RESET_ALL)
else:
print("Matricule %s found for %s. Name: %s" % (mat, f, name))
m = mat if mat else "NA"
if m not in matricules_data:
matricules_data[m] = []
# if no valid matricule has been found
if m != "NA" and grades_dfs and id_group is None:
invalid.append(m)
elif m != "NA":
duplicates.add(m)
matricules_data[m].append((id_box, name, file))
sumarries = []
csvf = "Id,Matricule,NomComplet,File\n"
def add_summary(mat, id_box, name, file, invalid=False, initial_index=1):
i = len(sumarries)+initial_index
l_csv = '%d,%s,%s,%s\n' % (i, mat if mat else '', name if name else '', file)
sumarry = create_summary(id_box, name, None, None,
"%d: %s" % (i, file.rsplit('/')[-1]), dpi,
align_matricule_left=False, name_bottom=False, invalid=invalid)
sumarries.append(sumarry)
return l_csv
print(Fore.RED)
if 'NA' in matricules_data:
for id_box, name, file in matricules_data['NA']:
print("No matricule found for %s" % file)
csvf += add_summary(None, id_box, None, file)
matricules_data.pop('NA')
for m in sorted(invalid):
print("No valid matricule %s for:" % m)
for id_box, name, file in matricules_data[m]:
print(" " + file)
csvf += add_summary(m, id_box, None, file, invalid=True)
matricules_data.pop(m)
for m in sorted(duplicates):
print("Duplicate files found for matricule %s:" % m)
for id_box, name, file in matricules_data[m]:
print(" " + file)
csvf += add_summary(m, id_box, name, file, invalid=True)
matricules_data.pop(m)
print(Style.RESET_ALL)
for m in sorted(matricules_data):
if len(matricules_data[m]) != 1:
raise ValueError('The list should contain only one element associated to a given matricule (%s)' % m)
id_box, name, file = matricules_data[m][0]
csvf += add_summary(m, id_box, name, file)
# save summary pdf and grades
pages = create_whole_summary(sumarries)
save_pages(pages, os.path.join(root_dir, "matricule_summary.pdf"))
with open(os.path.join(root_dir, "matricules.csv"), 'w') as wf:
wf.write(csvf)
def grade_all(paths, grades_csv, box, id_box=None, dpi=300, shape=(8.5,11)):
shape = (int(dpi * shape[0]), int(dpi * shape[1]))
# load csv
grades_dfs, grades_names = load_csv(grades_csv)
# load max grade if available
max_grade = None
for df in grades_dfs:
for idx, row in df.iterrows():
s = row[MF.max]
if pd.isna(s):
continue
if isinstance(s, str):
s = s.replace(',', '.')
try:
s = float(s)
except:
continue
if max_grade is None or s < max_grade:
max_grade = s
# loading our CNN model
classifier = load_model('digit_recognizer.h5')
# grade files
grades_data = []
dt = get_date()
trim = box['trim'] if 'trim' in box else None
for path in paths:
for root, dirs, files in os.walk(path):
for f in files:
if not f.endswith('.pdf'):
continue
# search matricule
m = re.search(re_mat, f)
if not m:
print("Matricule wasn't found in "+f)
continue
m = m.group()
# try to recognize each grade and verify the total
file = os.path.join(root, f)
if os.path.isfile(file):
grays = gray_images(file, [0], straighten=False, shape=shape)
if grays is None:
print(Fore.RED + "%s: No valid pdf" % f + Style.RESET_ALL)
continue
gray = grays[0]
total_matched, numbers, grades = grade(gray, box['grade'],
classifier=classifier, trim=trim, max_grade=max_grade)
i, name = get_name(m, grades_dfs)
if i < 0:
print(Fore.RED + "%s: Matricule (%s) not found in csv files" % (f, m) + Style.RESET_ALL)
# fill moodle csv file
if numbers:
if pd.isna(grades_dfs[i].at[m, MF.grade]):
print("%s: %.2f" % (f, numbers[-1]))
grades_dfs[i].at[m, MF.grade] = numbers[-1]
grades_dfs[i].at[m, MF.mdate] = dt
elif grades_dfs[i].at[m, MF.grade] != numbers[-1]:
print(Fore.RED + "%s: there is already a grade (%.2f) different of %.2f" %
(f, grades_dfs[i].at[m, MF.grade], numbers[-1]) + Style.RESET_ALL)
else:
print("%s: found same grade %.2f" % (f, numbers[-1]))
else:
print(Fore.GREEN + "%s: No valid grade" % f + Style.RESET_ALL)
grades_dfs[i].at[m, MF.mdate] = dt
# Display in the summary the identity box if provided
id_img = None
if id_box:
# find the id box
cropped = fetch_box(gray, id_box['front'])
cnts = cv2.findContours(find_edges(cropped, thick=0), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
imwrite_contours("id_gray", cropped, cnts, thick=5)
# Find the biggest contour for the front box
pos, biggest_c = max(enumerate(cnts), key=lambda cnt: cv2.contourArea(cnt[1]))
id_img = get_image_from_contour(cropped, biggest_c)
grades_data.append((m, i, f, grades, numbers, total_matched, id_img))
# check the number of files that have benn dropped on moodle if any
n = 0
for df in grades_dfs:
for idx, row in df.iterrows():
s = row[MF.status]
if pd.isna(s):
continue
if s.startswith(MF.status_start_filter):
n += 1
if n > 0 and n != len(grades_data):
print(Fore.RED + "%d copies have been uploaded on moodle, but %d have been graded" % (n, len(grades_data))
+ Style.RESET_ALL)
# add summarry
sumarries = [[] for f in grades_csv]
def add_summary(file, grades, mat, numbers, total_matched, id_group, id_img=None, initial_index=2):
lsum = sumarries[id_group]
# rename file
name = "%d: %s" % (len(lsum)+initial_index, file) # recover id box if provided
if id_img is not None:
sumarry = create_summary2(id_img, grades, mat, numbers, total_matched, name, dpi)
else:
sumarry = create_summary(grades, mat, numbers, total_matched, name, dpi)
lsum.append(sumarry)
grades_data = sorted(grades_data)
for mat, id_group, file, grades, numbers, total_matched, id_img in grades_data:
add_summary(file, grades, mat, numbers, total_matched, id_group, id_img)
# write summary
for i, f in enumerate(grades_csv):
pages = create_whole_summary(sumarries[i])
gname = f.split('.')[0]
save_pages(pages, gname + "_summary.pdf")
# store grades
df = grades_dfs[i]
# sort by status (Remis in first) then matricules (index)
status = np.array([not | pd.isna(v) | pandas.isna |
import re
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf import melt as cudf_melt
from cudf.core import DataFrame
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
)
@pytest.mark.parametrize("num_id_vars", [0, 1, 2, 10])
@pytest.mark.parametrize("num_value_vars", [0, 1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_melt(nulls, num_id_vars, num_value_vars, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some", "all"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame()
id_vars = []
for i in range(num_id_vars):
colname = "id" + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
elif nulls == "all":
data[:] = np.nan
pdf[colname] = data
id_vars.append(colname)
value_vars = []
for i in range(num_value_vars):
colname = "val" + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
elif nulls == "all":
data[:] = np.nan
pdf[colname] = data
value_vars.append(colname)
gdf = DataFrame.from_pandas(pdf)
got = cudf_melt(frame=gdf, id_vars=id_vars, value_vars=value_vars)
got_from_melt_method = gdf.melt(id_vars=id_vars, value_vars=value_vars)
expect = pd.melt(frame=pdf, id_vars=id_vars, value_vars=value_vars)
# pandas' melt makes the 'variable' column of 'object' type (string)
# cuDF's melt makes it Categorical because it doesn't support strings
expect["variable"] = expect["variable"].astype("category")
assert_eq(expect, got)
assert_eq(expect, got_from_melt_method)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize(
"dtype",
list(NUMERIC_TYPES + DATETIME_TYPES)
+ [pytest.param("str", marks=pytest.mark.xfail())],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_df_stack(nulls, num_cols, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame()
for i in range(num_cols):
colname = str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got = gdf.stack()
expect = pdf.stack()
if {None} == set(expect.index.names):
expect.rename_axis(
list(range(0, len(expect.index.names))), inplace=True
)
assert_eq(expect, got)
pass
@pytest.mark.parametrize("num_rows", [1, 2, 10, 1000])
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize(
"dtype", NUMERIC_TYPES + DATETIME_TYPES + ["category"]
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_interleave_columns(nulls, num_cols, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame(dtype=dtype)
for i in range(num_cols):
colname = str(i)
data = pd.Series(np.random.randint(0, 26, num_rows)).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
if dtype == "category":
with pytest.raises(ValueError):
assert gdf.interleave_columns()
else:
got = gdf.interleave_columns()
expect = pd.Series(np.vstack(pdf.to_numpy()).reshape((-1,))).astype(
dtype
)
assert_eq(expect, got)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize("count", [1, 2, 10])
@pytest.mark.parametrize("dtype", ALL_TYPES)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_tile(nulls, num_cols, num_rows, dtype, count):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame(dtype=dtype)
for i in range(num_cols):
colname = str(i)
data = pd.Series(np.random.randint(num_cols, 26, num_rows)).astype(
dtype
)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got = gdf.tile(count)
expect = pd.DataFrame(pd.concat([pdf] * count))
assert_eq(expect, got)
def _prepare_merge_sorted_test(
size,
nparts,
keys,
add_null=False,
na_position="last",
ascending=True,
series=False,
index=False,
):
if index:
df = (
cudf.datasets.timeseries()[:size]
.reset_index(drop=False)
.set_index(keys, drop=True)
)
else:
df = cudf.datasets.timeseries()[:size].reset_index(drop=False)
if add_null:
df.iloc[1, df.columns.get_loc(keys[0])] = None
chunk = int(size / nparts)
indices = [i * chunk for i in range(0, nparts)] + [size]
if index:
dfs = [
df.iloc[indices[i] : indices[i + 1]]
.copy()
.sort_index(ascending=ascending)
for i in range(nparts)
]
elif series:
df = df[keys[0]]
dfs = [
df.iloc[indices[i] : indices[i + 1]]
.copy()
.sort_values(na_position=na_position, ascending=ascending)
for i in range(nparts)
]
else:
dfs = [
df.iloc[indices[i] : indices[i + 1]]
.copy()
.sort_values(keys, na_position=na_position, ascending=ascending)
for i in range(nparts)
]
return df, dfs
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("keys", [None, ["id"], ["name", "timestamp"]])
@pytest.mark.parametrize("nparts", [2, 10])
def test_df_merge_sorted(nparts, keys, na_position, ascending):
size = 100
keys_1 = keys or ["timestamp"]
# Null values NOT currently supported with Categorical data
# or when `ascending=False`
add_null = keys_1[0] not in ("name")
df, dfs = _prepare_merge_sorted_test(
size,
nparts,
keys_1,
add_null=add_null,
na_position=na_position,
ascending=ascending,
)
expect = df.sort_values(
keys_1, na_position=na_position, ascending=ascending
)
result = cudf.merge_sorted(
dfs, keys=keys, na_position=na_position, ascending=ascending
)
if keys:
expect = expect[keys]
result = result[keys]
assert expect.index.dtype == result.index.dtype
assert_eq(expect.reset_index(drop=True), result.reset_index(drop=True))
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("index", ["id", "x"])
@pytest.mark.parametrize("nparts", [2, 10])
def test_df_merge_sorted_index(nparts, index, ascending):
size = 100
df, dfs = _prepare_merge_sorted_test(
size, nparts, index, ascending=ascending, index=True
)
expect = df.sort_index(ascending=ascending)
result = cudf.merge_sorted(dfs, by_index=True, ascending=ascending)
assert_eq(expect.index, result.index)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("keys", [None, ["name", "timestamp"]])
def test_df_merge_sorted_ignore_index(keys, na_position, ascending):
size = 100
nparts = 3
keys_1 = keys or ["timestamp"]
# Null values NOT currently supported with Categorical data
# or when `ascending=False`
add_null = keys_1[0] not in ("name")
df, dfs = _prepare_merge_sorted_test(
size,
nparts,
keys_1,
add_null=add_null,
na_position=na_position,
ascending=ascending,
)
expect = df.sort_values(
keys_1, na_position=na_position, ascending=ascending
)
result = cudf.merge_sorted(
dfs,
keys=keys,
na_position=na_position,
ascending=ascending,
ignore_index=True,
)
if keys:
expect = expect[keys]
result = result[keys]
assert_eq(expect.reset_index(drop=True), result)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("key", ["id", "name", "timestamp"])
@pytest.mark.parametrize("nparts", [2, 10])
def test_series_merge_sorted(nparts, key, na_position, ascending):
size = 100
df, dfs = _prepare_merge_sorted_test(
size,
nparts,
[key],
na_position=na_position,
ascending=ascending,
series=True,
)
expect = df.sort_values(na_position=na_position, ascending=ascending)
result = cudf.merge_sorted(
dfs, na_position=na_position, ascending=ascending
)
assert_eq(expect.reset_index(drop=True), result.reset_index(drop=True))
@pytest.mark.parametrize(
"index, column, data",
[
([], [], []),
([0], [0], [0]),
([0, 0], [0, 1], [1, 2.0]),
([0, 1], [0, 0], [1, 2.0]),
([0, 1], [0, 1], [1, 2.0]),
(["a", "a", "b", "b"], ["c", "d", "c", "d"], [1, 2, 3, 4]),
(
["a", "a", "b", "b", "a"],
["c", "d", "c", "d", "e"],
[1, 2, 3, 4, 5],
),
],
)
def test_pivot_simple(index, column, data):
pdf = pd.DataFrame({"index": index, "column": column, "data": data})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.pivot("index", "column")
got = gdf.pivot("index", "column")
check_index_and_columns = expect.shape != (0, 0)
assert_eq(
expect,
got,
check_dtype=False,
check_index_type=check_index_and_columns,
check_column_type=check_index_and_columns,
)
def test_pivot_multi_values():
# from Pandas docs:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html
pdf = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.pivot(index="foo", columns="bar", values=["baz", "zoo"]),
gdf.pivot(index="foo", columns="bar", values=["baz", "zoo"]),
check_dtype=False,
)
@pytest.mark.parametrize(
"level",
[
0,
1,
2,
"foo",
"bar",
"baz",
[],
[0, 1],
["foo"],
["foo", "bar"],
pytest.param(
[0, 1, 2],
marks=pytest.mark.xfail(reason="Pandas behaviour unclear"),
),
pytest.param(
["foo", "bar", "baz"],
marks=pytest.mark.xfail(reason="Pandas behaviour unclear"),
),
],
)
def test_unstack_multiindex(level):
pdf = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
).set_index(["foo", "bar", "baz"])
gdf = cudf.from_pandas(pdf)
assert_eq(
pdf.unstack(level=level), gdf.unstack(level=level), check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[{"A": [1.0, 2.0, 3.0, 4.0, 5.0], "B": [11.0, 12.0, 13.0, 14.0, 15.0]}],
)
@pytest.mark.parametrize(
"index",
[
pd.Index(range(0, 5), name=None),
pd.Index(range(0, 5), name="row_index"),
],
)
@pytest.mark.parametrize(
"col_idx",
[
pd.Index(["a", "b"], name=None),
pd.Index(["a", "b"], name="col_index"),
| pd.MultiIndex.from_tuples([("c", 1), ("c", 2)], names=[None, None]) | pandas.MultiIndex.from_tuples |
"""Test attributing simple impact."""
import numpy as np
import pandas as pd
import pytest
from nbaspa.data.endpoints.pbp import EventTypes
from nbaspa.player_rating.tasks import SimplePlayerImpact
@pytest.mark.parametrize(
"evt",
[
EventTypes.REBOUND,
EventTypes.FREE_THROW,
EventTypes.VIOLATION,
EventTypes.FIELD_GOAL_MISSED
]
)
def test_basic_impact(evt):
"""Test attributing simple impact."""
df = pd.DataFrame(
{
"EVENTMSGTYPE": evt,
"NBA_WIN_PROB_CHANGE": [0.1, 0.1],
"HOMEDESCRIPTION": ["DESCRIPTION", None],
"VISITORDESCRIPTION": [None, "DESCRIPTION"],
"PLAYER1_ID": [123, 456],
"PLAYER2_ID": 0,
"HOME_TEAM_ID": [161, 161],
"VISITOR_TEAM_ID": [162, 162],
"SHOT_VALUE": np.nan,
"HOME_OFF_RATING": 100,
"VISITOR_OFF_RATING": 100,
"TIME": [1, 2]
}
)
tsk = SimplePlayerImpact()
output = tsk.run(pbp=df, mode="nba")
assert output["PLAYER1_IMPACT"].equals(pd.Series([0.1, -0.1]))
assert output["PLAYER2_IMPACT"].equals(pd.Series([0.0, 0.0]))
assert output["PLAYER3_IMPACT"].equals( | pd.Series([0.0, 0.0]) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
| tm.assert_frame_equal(res, exp) | pandas.util.testing.assert_frame_equal |
import os
import pytz
import logging
import pymongo
import multiprocessing
import pandas as pd
from datetime import datetime
from collections import Counter, defaultdict
from typing import List, Set, Tuple
# For non-docker use, change to your url (e.g., localhost:27017)
MONGO_URL = "mongodb://localhost:27017"
CACHE_DIR = "cache/"
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def get_data() -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Returns (projects, libraries, migrations, rules, dep_changes).
This function should be used get the required data for analysis,
to avoid data scope inconsistencies in different analysis modules.
"""
projects = select_projects_from_libraries_io()
libraries = select_libraries()
migrations = select_migrations()
lib_names = set(libraries["name"])
rules = select_rules(lib_names)
dep_changes = select_dependency_changes_all(lib_names)
migrations.startCommitTime = pd.to_datetime(migrations.startCommitTime, utc=True)
migrations.endCommitTime = pd.to_datetime(migrations.endCommitTime, utc=True)
return projects, libraries, migrations, rules, dep_changes
def select_projects_from_libraries_io() -> pd.DataFrame:
"""Select a project dataframe as our research subject"""
if os.path.exists(os.path.join(CACHE_DIR, "projects.csv")):
return pd.read_csv(os.path.join(CACHE_DIR, "projects.csv"))
db = pymongo.MongoClient(MONGO_URL).migration_helper
projects = pd.DataFrame(list(db.lioRepository.find({
"hostType": "GitHub",
"fork": False,
"language": "Java",
"starsCount": {"$gt": 10},
})))
logging.debug(
f"{len(projects)} non-fork GitHub Java projects with stars > 10")
name_to_pom_count = Counter()
name_to_pom_commits = defaultdict(set)
for seq in db.wocDepSeq3.find():
name = seq["repoName"].replace("_", "/")
if len(seq["seq"]) >= 1:
name_to_pom_count[name] += 1
for item in seq["seq"]:
name_to_pom_commits[name].add(item["commit"])
projects = projects[projects["nameWithOwner"].isin(
name_to_pom_count.keys())]
projects["pomFilesCount"] = projects["nameWithOwner"].map(
lambda n: name_to_pom_count[n])
projects["pomFileModifyingCommitsCount"] = projects["nameWithOwner"].map(
lambda n: len(name_to_pom_commits[n]))
logging.debug(
f"{len(projects)} non-fork GitHub Java projects with stars > 10 and one pom.xml file")
projects["commitsCount"] = projects["_id"].map(
lambda i: len(db.wocRepository.find_one({"_id": i})["commits"]))
projects.to_csv(os.path.join(CACHE_DIR, "projects.csv"), index=False)
return projects
def select_libraries_from_libraries_io() -> pd.DataFrame:
"""Select a library dataframe as our research subject"""
db = pymongo.MongoClient(MONGO_URL).migration_helper
libraries = pd.DataFrame(list(db.lioProject.find({
"platform": "Maven",
"dependentRepositoriesCount": {"$gt": 10}
})))
logging.debug(
f"{len(libraries)} libraries with dependent repository count > 10")
return libraries
def select_libraries() -> pd.DataFrame:
"""Only keep libraries that has been added more than 10 times in our repository dataset"""
if os.path.exists(os.path.join(CACHE_DIR, "libraries.csv")):
return pd.read_csv(os.path.join(CACHE_DIR, "libraries.csv"))
libraries = select_libraries_from_libraries_io()
dep_changes = select_dependency_changes_all()
added_projects = defaultdict(set)
for idx, chg in dep_changes[dep_changes["type"] == "add"].iterrows():
added_projects[chg["lib2"]].add(chg["project"])
libraries["addedProjects"] = libraries["name"].map(lambda x: len(added_projects[x]))
libraries["versionsCount"] = libraries["name"].map(lambda x: len(select_library_versions(x)))
libraries = libraries[libraries.addedProjects > 10].copy()
libraries.to_csv(os.path.join(CACHE_DIR, "libraries.csv"), index=False)
return libraries
def select_rules(valid_libs: Set[str]) -> pd.DataFrame:
"""Select a migration rule dataframe as our research subject"""
rules = | pd.read_excel("data/rules.xlsx", engine="openpyxl") | pandas.read_excel |
import os
from multiprocessing import Value, context
import pandas as pd
import socket
import threading
import multiprocessing
import re
from contextlib import contextmanager
from pathlib import Path
import json
from portalocker import RLock, AlreadyLocked
import shutil
import pytest
from aljpy import humanhash
from fnmatch import fnmatch
import uuid
from . import tests
from logging import getLogger
log = getLogger(__name__)
ROOT = 'output/pavlov'
### Basic file stuff
def root():
root = Path(ROOT)
if not root.exists():
root.mkdir(exist_ok=True, parents=True)
return root
def path(run, res=True):
if res:
run = resolve(run)
return root() / run
def delete(run):
assert run != ''
shutil.rmtree(path(run))
@contextmanager
def lock(run, res=True):
# It's tempting to lock on the _info.json file, since that's where
# all the run state is kept. But that leads to some confusion about
# how to handle race conditions when *creating* the _info.json file,
# and also about how to handle global operations that aren't exclusively
# about that file.
#
# Better to just lock on a purpose-made lock file.
p = path(run, res)
if not p.exists():
raise ValueError('Can\'t take lock as run doesn\'t exist')
with RLock(p / '_lock'):
yield
### Info file stuff
def infopath(run, res=True):
return path(run, res) / '_info.json'
def info(run, res=True):
with lock(run, res):
path = infopath(run, res)
if not path.exists():
raise ValueError(f'Run "{run}" info file has not been created yet')
return json.loads(path.read_text())
def new_info(run, val={}, res=True):
path = infopath(run, res)
path.parent.mkdir(exist_ok=True, parents=True)
with lock(run, res):
if path.exists():
raise ValueError('Info file already exists')
if not isinstance(val, dict):
raise ValueError('Info value must be a dict')
path.write_text(json.dumps(val))
return path
@contextmanager
def update(run):
global _cache
with lock(run):
path = infopath(run)
i = json.loads(path.read_text())
yield i
path.write_text(json.dumps(i))
# Invalidate the cache
_cache = {}
### Run stuff
def new_name(suffix='', now=None):
now = (now or tests.timestamp()).strftime('%Y-%m-%d %H-%M-%S')
hash = humanhash(str(uuid.uuid4()), n=2)
return f'{now} {hash} {suffix}'.strip()
def new_run(suffix='', **kwargs):
now = tests.timestamp()
run = new_name(suffix, now)
kwargs = {**kwargs,
'_created': str(now),
'_host': socket.gethostname(),
'_files': {},
'_env': dict(os.environ)}
log.info(f'Created run {run}')
new_info(run, kwargs, res=False)
return run
_cache = {}
def runs(name=None, **kwargs):
if name is not None or kwargs:
res = set(resolutions(name, **kwargs))
return {k: v for k, v in runs().items() if k in res}
global _cache
cache = {}
for dir in root().iterdir():
if dir.name in _cache:
cache[dir.name] = _cache[dir.name]
else:
try:
cache[dir.name] = info(dir.name, res=False)
except ValueError:
# We'll end up here if the run's dir has been created, but
# not the info file. That usually happens if we create a
# run in another process.
pass
order = sorted(cache, key=lambda n: cache[n]['_created'])
_cache = {n: cache[n] for n in order}
return _cache
def pandas(name=None, **kwargs):
df = {}
for run, info in runs(name, **kwargs).items():
df[run] = {k: v for k, v in info.items()}
df = pd.DataFrame.from_dict(df, orient='index')
if '_created' in df:
df['_created'] = | pd.to_datetime(df['_created']) | pandas.to_datetime |
import json
import numpy as np
import pandas as pd
import requests
def get_state_fips_codes():
"""
Returns dataframe of state FIPS codes and state names
from the BLS JT series reference
"""
url = "https://download.bls.gov/pub/time.series/jt/jt.state"
data = requests.get(url)
data_fmt = data.content.decode("utf-8").split("\r\n")
df = pd.DataFrame(
[x.split("\t") for x in data_fmt[1:-1]],
columns=data_fmt[0].split("\t"),
).loc[:, ["state_code", "state_text"]]
return df
def construct_jolts_id(
prefix="JT",
sa="S",
industry="000000",
state="00",
area="00000",
size_class="00",
element="QU",
rate_level="R",
):
"""Helper function for constructing a JOLTS ID for API requests"""
return (
prefix
+ sa
+ industry
+ state
+ area
+ size_class
+ element
+ rate_level
)
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx : min(ndx + n, l)]
def build_jolts_dataframe(
registration_key,
element="QU",
rate_level="R",
sa="S",
industry="000000",
start_year=2018,
end_year=2022,
name="Quit Rate (Seasonally Adjusted)",
annual=True,
):
"""Download JOLTS Data from the BLS API
Parameters
----------
registration_key : str
BLS API registration key
element : str
Element of JOLTS Survey to download
rate_level : str
Rate ("R") or Level ("L")
sa : str
Seasonally Adjusted ("S") or not ("U")
industry : str
Industry code (e.g. "000000" for all industries)
start_year : int
Starting year of JOLTS data to download
end_year : int
Ending year of JOLTS data to download
name : str
Name to give to the downloaded series
annual : bool
If true, include annual estimates
Returns
-------
pandas.DaraFrame
DataFrame of specified JOLTS series with columns for
FIPS code, state name, year, date, name, footnotes, series id
"""
fips = get_state_fips_codes()
codes = [
construct_jolts_id(
state=x,
element=element,
rate_level=rate_level,
sa=sa,
industry=industry,
)
for x in fips["state_code"]
]
codes_iter = batch(codes, n=20)
# API Call to BLS
response_series = []
for z in codes_iter:
headers = {"Content-type": "application/json"}
payload = {
"seriesid": z,
"startyear": f"{start_year}",
"endyear": f"{end_year}",
}
payload.update({"registrationKey": registration_key})
if annual:
payload.update({"annualaverage": "true"})
payload = json.dumps(payload)
response = requests.post(
"https://api.bls.gov/publicAPI/v2/timeseries/data/",
data=payload,
headers=headers,
)
response.raise_for_status()
response_series.extend(response.json()["Results"]["series"])
# Parse Response into DataFrame
dfs = []
# Build a pandas series from the API results, bls_series
for s in response_series:
series_id = s["seriesID"]
data = s["data"]
data = pd.DataFrame(data)
data["series"] = series_id
dfs.append(data)
df_full = | pd.concat(dfs) | pandas.concat |
"""
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by <NAME> were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, <NAME>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of <NAME> nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import date, datetime, timedelta
from io import BytesIO
import os
from textwrap import dedent
import warnings
from dateutil.parser import parse
import numpy as np
from pandas.errors import PerformanceWarning
from pandas.util._move import (
BadMove as _BadMove, move_into_mutable_buffer as _move_into_mutable_buffer)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64tz_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import ( # noqa:F401
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index,
Index, Int64Index, Interval, IntervalIndex, MultiIndex, NaT, Panel, Period,
PeriodIndex, RangeIndex, Series, TimedeltaIndex, Timestamp)
from pandas.core import internals
from pandas.core.arrays import DatetimeArray, IntervalArray, PeriodArray
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager, _safe_reshape, make_block
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.io.common import _stringify_path, get_filepath_or_buffer
from pandas.io.msgpack import ExtType, Packer as _Packer, Unpacker as _Unpacker
# check which compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding : encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding : Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : same type as object stored in file
"""
path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
if len(unpacked_obj) == 1:
return unpacked_obj[0]
if should_close:
try:
path_or_buf.close()
except IOError:
pass
return unpacked_obj
# see if we have an actual file
if isinstance(path_or_buf, str):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
if isinstance(path_or_buf, bytes):
# treat as a binary-like
fh = None
try:
fh = BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read):
# treat as a buffer like
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
'datetime64[ns]': np.dtype('M8[ns]'),
'datetime64[us]': np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
'timedelta64[ns]': np.dtype('m8[ns]'),
'timedelta64[us]': np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# windows (32 bit) compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == 'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == 'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the bytes into a numpy array.
buf = np.frombuffer(values, dtype=dtype)
buf = buf.copy() # required to not mutate the original data
buf.flags.writeable = True
return buf
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {'typ': 'range_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'start': getattr(obj, '_start', None),
'stop': getattr(obj, '_stop', None),
'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {'typ': 'period_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'freq': getattr(obj, 'freqstr', None),
'dtype': obj.dtype.name,
'data': convert(obj.asi8),
'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = tz.zone
obj = obj.tz_convert('UTC')
return {'typ': 'datetime_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'dtype': obj.dtype.name,
'data': convert(obj.asi8),
'freq': getattr(obj, 'freqstr', None),
'tz': tz,
'compress': compressor}
elif isinstance(obj, (IntervalIndex, IntervalArray)):
if isinstance(obj, IntervalIndex):
typ = 'interval_index'
else:
typ = 'interval_array'
return {'typ': typ,
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'left': getattr(obj, 'left', None),
'right': getattr(obj, 'right', None),
'closed': getattr(obj, 'closed', None)}
elif isinstance(obj, MultiIndex):
return {'typ': 'multi_index',
'klass': obj.__class__.__name__,
'names': getattr(obj, 'names', None),
'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
else:
return {'typ': 'index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
elif isinstance(obj, Categorical):
return {'typ': 'category',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'codes': obj.codes,
'categories': obj.categories,
'ordered': obj.ordered,
'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {'typ': 'series',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
'index': obj.index,
'dtype': obj.dtype.name,
'data': convert(obj.values),
'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in obj.items()])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {'typ': 'block_manager',
'klass': obj.__class__.__name__,
'axes': data.axes,
'blocks': [{'locs': b.mgr_locs.as_array,
'values': convert(b.values),
'shape': b.values.shape,
'dtype': b.dtype.name,
'klass': b.__class__.__name__,
'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64)) or obj is NaT:
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = tz.zone
freq = obj.freq
if freq is not None:
freq = freq.freqstr
return {'typ': 'timestamp',
'value': obj.value,
'freq': freq,
'tz': tz}
if obj is NaT:
return {'typ': 'nat'}
elif isinstance(obj, np.timedelta64):
return {'typ': 'timedelta64',
'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {'typ': 'timedelta',
'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {'typ': 'datetime64',
'data': str(obj)}
elif isinstance(obj, datetime):
return {'typ': 'datetime',
'data': obj.isoformat()}
elif isinstance(obj, date):
return {'typ': 'date',
'data': obj.isoformat()}
raise Exception(
"cannot encode this datetimelike object: {obj}".format(obj=obj))
elif isinstance(obj, Period):
return {'typ': 'period',
'ordinal': obj.ordinal,
'freq': obj.freqstr}
elif isinstance(obj, Interval):
return {'typ': 'interval',
'left': obj.left,
'right': obj.right,
'closed': obj.closed}
elif isinstance(obj, BlockIndex):
return {'typ': 'block_index',
'klass': obj.__class__.__name__,
'blocs': obj.blocs,
'blengths': obj.blengths,
'length': obj.length}
elif isinstance(obj, IntIndex):
return {'typ': 'int_index',
'klass': obj.__class__.__name__,
'indices': obj.indices,
'length': obj.length}
elif isinstance(obj, np.ndarray):
return {'typ': 'ndarray',
'shape': obj.shape,
'ndim': obj.ndim,
'dtype': obj.dtype.name,
'data': convert(obj),
'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {'typ': 'np_scalar',
'sub_typ': 'np_complex',
'dtype': obj.dtype.name,
'real': obj.real.__repr__(),
'imag': obj.imag.__repr__()}
else:
return {'typ': 'np_scalar',
'dtype': obj.dtype.name,
'data': obj.__repr__()}
elif isinstance(obj, complex):
return {'typ': 'np_complex',
'real': obj.real.__repr__(),
'imag': obj.imag.__repr__()}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get('typ')
if typ is None:
return obj
elif typ == 'timestamp':
freq = obj['freq'] if 'freq' in obj else obj['offset']
return | Timestamp(obj['value'], tz=obj['tz'], freq=freq) | pandas.Timestamp |
import datetime
import logging
import os
import shutil
import geopandas as gpd
import numpy as np
import pandas as pd
import pytz
from berlin_hp import electricity
from demandlib import bdew as bdew
from demandlib import particular_profiles as profiles
from matplotlib import cm
from matplotlib import dates as mdates
from matplotlib import image as mpimg
from matplotlib import patches as patches
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
from oemof.tools import logger
from reegis import bmwi
from reegis import coastdat
from reegis import config as cfg
from reegis import demand_elec
from reegis import energy_balance
from reegis import entsoe
from reegis import geometries
from reegis import inhabitants
from reegis import powerplants
from reegis import storages
from scenario_builder import feedin
from reegis_phd import data_analysis
from reegis_phd.figures.figures_base import create_subplot
from reegis_phd.figures.figures_base import show_download_image
def fig_patch_offshore(**kwargs):
ax = create_subplot((12, 4), **kwargs)
federal_states = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("geometry", "federalstates_polygon"),
)
# federal_states.drop(['P0'], inplace=True)
mydf = powerplants.patch_offshore_wind(pd.DataFrame(), [])
mygdf = gpd.GeoDataFrame(mydf)
fs = federal_states.set_index("iso").loc[
["NI", "SH", "HH", "MV", "BB", "BE", "HB", "ST", "NW"]
]
offshore = federal_states.set_index("iso").loc[["N0", "N1", "O0"]]
fs["geometry"] = fs["geometry"].simplify(0.01)
offshore["geometry"] = offshore["geometry"].simplify(0.01)
ax = fs.plot(
ax=ax, facecolor="#badd69", edgecolor="#777777", aspect="equal"
)
ax = offshore.plot(
ax=ax, facecolor="#ffffff", edgecolor="#777777", aspect="equal"
)
mygdf.plot(
markersize=mydf.capacity, alpha=0.5, ax=ax, legend=True, aspect="equal"
)
plt.ylim(bottom=52.5)
ax.set_axis_off()
plt.subplots_adjust(left=0, bottom=0, top=1, right=1)
ax.legend()
return "patch_offshore", None
def fig_powerplants(**kwargs):
plt.rcParams.update({"font.size": 14})
geo = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("geometry", "federalstates_polygon"),
)
my_name = "my_federal_states" # doctest: +SKIP
my_year = 2015 # doctest: +SKIP
pp_reegis = powerplants.get_powerplants_by_region(geo, my_year, my_name)
data_path = os.path.join(os.path.dirname(__file__), "../data", "static")
fn_bnetza = os.path.join(data_path, cfg.get("plot_data", "bnetza"))
pp_bnetza = pd.read_csv(fn_bnetza, index_col=[0], skiprows=2, header=[0])
ax = create_subplot((10, 5), **kwargs)
see = "sonst. erneuerb."
my_dict = {
"Bioenergy": see,
"Geothermal": see,
"Hard coal": "Kohle",
"Hydro": see,
"Lignite": "Kohle",
"Natural gas": "Erdgas",
"Nuclear": "Nuklear",
"Oil": "sonstige fossil",
"Other fossil fuels": "sonstige fossil",
"Other fuels": "sonstige fossil",
"Solar": "Solar",
"Waste": "sonstige fossil",
"Wind": "Wind",
"unknown from conventional": "sonstige fossil",
}
my_dict2 = {
"Biomasse": see,
"Braunkohle": "Kohle",
"Erdgas": "Erdgas",
"Kernenergie": "Nuklear",
"Laufwasser": see,
"Solar": "Solar",
"Sonstige (ne)": "sonstige fossil",
"Steinkohle": "Kohle",
"Wind": "Wind",
"Sonstige (ee)": see,
"Öl": "sonstige fossil",
}
my_colors = [
"#555555",
"#6c3012",
"#db0b0b",
"#ffde32",
"#335a8a",
"#163e16",
"#501209",
]
# pp_reegis.capacity_2015.unstack().to_excel('/home/uwe/shp/wasser.xls')
pp_reegis = (
pp_reegis.capacity_2015.unstack().groupby(my_dict, axis=1).sum()
)
pp_reegis = pp_reegis.merge(
geo["iso"], left_index=True, right_index=True
).set_index("iso")
pp_reegis.loc["AWZ"] = (
pp_reegis.loc["N0"] + pp_reegis.loc["N1"] + pp_reegis.loc["O0"]
)
pp_reegis.drop(["N0", "N1", "O0", "P0"], inplace=True)
pp_bnetza = pp_bnetza.groupby(my_dict2, axis=1).sum()
ax = (
pp_reegis.sort_index()
.sort_index(1)
.div(1000)
.plot(
kind="bar",
stacked=True,
position=1.1,
width=0.3,
legend=False,
color=my_colors,
ax=ax,
)
)
pp_bnetza.sort_index().sort_index(1).div(1000).plot(
kind="bar",
stacked=True,
position=-0.1,
width=0.3,
ax=ax,
color=my_colors,
alpha=0.9,
)
plt.xlabel("Bundesländer / AWZ")
plt.ylabel("Installierte Leistung [GW]")
plt.xlim(left=-0.5)
plt.subplots_adjust(bottom=0.17, top=0.98, left=0.08, right=0.96)
b_sum = pp_bnetza.sum() / 1000
b_total = int(round(b_sum.sum()))
b_ee_sum = int(round(b_sum.loc[["Wind", "Solar", see]].sum()))
b_fs_sum = int(
round(
b_sum.loc[["Erdgas", "Kohle", "Nuklear", "sonstige fossil"]].sum()
)
)
r_sum = pp_reegis.sum() / 1000
r_total = int(round(r_sum.sum()))
r_ee_sum = int(round(r_sum.loc[["Wind", "Solar", see]].sum()))
r_fs_sum = int(
round(
r_sum.loc[["Erdgas", "Kohle", "Nuklear", "sonstige fossil"]].sum()
)
)
text = {
"reegis": (2.3, 42, "reegis"),
"BNetzA": (3.9, 42, "BNetzA"),
"b_sum1": (0, 39, "gesamt"),
"b_sum2": (2.5, 39, "{0} {1}".format(r_total, b_total)),
"b_fs": (0, 36, "fossil"),
"b_fs2": (2.5, 36, " {0} {1}".format(r_fs_sum, b_fs_sum)),
"b_ee": (0, 33, "erneuerbar"),
"b_ee2": (2.5, 33, " {0} {1}".format(r_ee_sum, b_ee_sum)),
}
for t, c in text.items():
plt.text(c[0], c[1], c[2], size=14, ha="left", va="center")
b = patches.Rectangle((-0.2, 31.8), 5.7, 12, color="#cccccc")
ax.add_patch(b)
ax.add_patch(patches.Shadow(b, -0.05, -0.2))
return "vergleich_kraftwerke_reegis_bnetza", None
def fig_storage_capacity(**kwargs):
plt.rcParams.update({"font.size": 12})
ax = create_subplot((6, 4), **kwargs)
federal_states = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("geometry", "federalstates_polygon"),
)
federal_states.set_index("iso", drop=True, inplace=True)
federal_states["geometry"] = federal_states["geometry"].simplify(0.02)
phes = storages.pumped_hydroelectric_storage_by_region(
federal_states, 2014, "federal_states"
)
fs = federal_states.merge(
phes, left_index=True, right_index=True, how="left"
).fillna(0)
fs.drop(["N0", "N1", "O0", "P0"], inplace=True)
fs["energy"] = fs["energy"].div(1000)
# colormap = "YlGn"
colormap = "Greys"
ax = fs.plot(column="energy", cmap=colormap, ax=ax, aspect="equal")
ax = fs.boundary.plot(ax=ax, color="#777777", aspect="equal")
coords = {
"NI": (9.7, 52.59423440995961),
"SH": (9.8, 53.9),
"ST": (11.559203329244966, 51.99003282648907),
"NW": (7.580292138948966, 51.4262307721131),
"BW": (9.073099768325736, 48.5),
"BY": (11.5, 48.91810114600406),
"TH": (10.9, 50.8),
"HE": (9.018890328297207, 50.52634809768823),
"SN": (13.3, 50.928277090542124),
}
for idx, row in fs.iterrows():
if row["energy"] > 0:
if row["energy"] > 10:
color = "#dddddd"
else:
color = "#000000"
plt.annotate(
s=round(row["energy"], 1),
xy=coords[idx],
horizontalalignment="center",
color=color,
)
ax.set_axis_off()
scatter = ax.collections[0]
cbar = plt.colorbar(scatter, ax=ax)
cbar.set_label("Speicherkapazität [GWh]", rotation=270, labelpad=15)
plt.subplots_adjust(left=0, bottom=0.05, top=0.95)
return "storage_capacity_by_federal_states", None
def fig_inhabitants():
plt.rcParams.update({"font.size": 18})
f, ax_ar = plt.subplots(1, 2, figsize=(16, 5.6))
df = pd.DataFrame()
for year in range(2011, 2018):
df[year] = inhabitants.get_ew_by_federal_states(year)
df.sort_values(2017, inplace=True)
df.transpose().div(1000).plot(
kind="bar", stacked=True, cmap="tab20b_r", ax=ax_ar[0]
)
print(df)
handles, labels = ax_ar[0].get_legend_handles_labels()
ax_ar[0].legend(
handles[::-1],
labels[::-1],
loc="upper left",
bbox_to_anchor=(1, 1.025),
)
# plt.subplots_adjust(left=0.14, bottom=0.15, top=0.9, right=0.8)
ax_ar[0].set_ylabel("Tsd. Einwohner")
ax_ar[0].set_xticklabels(ax_ar[0].get_xticklabels(), rotation=0)
plt.xticks(rotation=0)
ew = inhabitants.get_ew_geometry(2017, polygon=True)
ew["ew_area"] = ew["EWZ"].div(ew["KFL"]).fillna(0)
ew["geometry"] = ew["geometry"].simplify(0.01)
ew.plot(
column="ew_area", vmax=800, cmap="cividis", ax=ax_ar[1], aspect="equal"
)
ax_ar[1].set_axis_off()
divider = make_axes_locatable(ax_ar[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
norm = Normalize(vmin=0, vmax=800)
n_cmap = cm.ScalarMappable(norm=norm, cmap="cividis")
n_cmap.set_array(np.array([]))
cbar = plt.colorbar(n_cmap, ax=ax_ar[1], extend="max", cax=cax)
cbar.set_label("Einwohner pro km²", rotation=270, labelpad=30)
plt.subplots_adjust(left=0.09, top=0.98, bottom=0.06, right=0.93)
# plt.xticks(rotation=0)
return "inhabitants_by_ferderal_states", None
def fig_average_weather():
plt.rcParams.update({"font.size": 20})
f, ax_ar = plt.subplots(1, 2, figsize=(14, 4.5), sharey=True)
my_cmap = LinearSegmentedColormap.from_list(
"mycmap",
[
(0, "#dddddd"),
(1 / 7, "#c946e5"),
(2 / 7, "#ffeb00"),
(3 / 7, "#26a926"),
(4 / 7, "#c15c00"),
(5 / 7, "#06ffff"),
(6 / 7, "#f24141"),
(7 / 7, "#1a2663"),
],
)
weather_path = cfg.get("paths", "coastdat")
# Download missing weather files
pattern = "coastDat2_de_{0}.h5"
for year in range(1998, 2015):
fn = os.path.join(weather_path, pattern.format(year))
if not os.path.isfile(fn):
coastdat.download_coastdat_data(filename=fn, year=year)
pattern = "average_data_{data_type}.csv"
dtype = "v_wind"
fn = os.path.join(weather_path, pattern.format(data_type=dtype))
if not os.path.isfile(fn):
coastdat.store_average_weather(dtype, out_file_pattern=pattern)
df = pd.read_csv(fn, index_col=[0])
coastdat_poly = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
coastdat_poly = coastdat_poly.merge(df, left_index=True, right_index=True)
ax = coastdat_poly.plot(
column="v_wind_avg",
cmap=my_cmap,
vmin=1,
vmax=8,
ax=ax_ar[0],
aspect="equal",
)
ax = (
geometries.get_germany_with_awz_polygon()
.simplify(0.05)
.boundary.plot(ax=ax, color="#555555", aspect="equal")
)
ax.set_axis_off()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
norm = Normalize(vmin=1, vmax=8)
n_cmap = cm.ScalarMappable(norm=norm, cmap=my_cmap)
n_cmap.set_array(np.array([]))
cbar = plt.colorbar(n_cmap, ax=ax, extend="both", cax=cax)
cbar.set_label("Windgeschwindigkeit [m/s]", rotation=270, labelpad=30)
weather_path = cfg.get("paths", "coastdat")
dtype = "temp_air"
fn = os.path.join(weather_path, pattern.format(data_type=dtype))
if not os.path.isfile(fn):
coastdat.store_average_weather(
dtype, out_file_pattern=pattern, years=[2014, 2013, 2012]
)
df = pd.read_csv(fn, index_col=[0]) - 273.15
print(df.mean())
coastdat_poly = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
coastdat_poly = coastdat_poly.merge(df, left_index=True, right_index=True)
ax = coastdat_poly.plot(
column="temp_air_avg",
cmap="rainbow",
vmin=7,
vmax=11,
ax=ax_ar[1],
aspect="equal",
)
ax = (
geometries.get_germany_with_awz_polygon()
.simplify(0.05)
.boundary.plot(ax=ax, color="#555555", aspect="equal")
)
ax.set_axis_off()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
norm = Normalize(vmin=5, vmax=11)
n_cmap = cm.ScalarMappable(norm=norm, cmap="rainbow")
n_cmap.set_array(np.array([]))
cbar = plt.colorbar(n_cmap, ax=ax, extend="both", cax=cax)
cbar.set_label("Temperatur [°C]", rotation=270, labelpad=30)
plt.subplots_adjust(left=0, top=0.97, bottom=0.03, right=0.93, wspace=0.1)
return "average_weather", None
def fig_strahlungsmittel():
return show_download_image("strahlungsmittel_dwd_coastdat", ["svg"])
def fig_module_comparison():
plt.rcParams.update({"font.size": 15})
plt.sca(create_subplot((10.7, 5)))
df = pd.read_csv(
os.path.join(cfg.get("paths", "data_my_reegis"), "module_feedin.csv"),
index_col=0,
)["dc_norm"]
print(df)
print(df.sort_values())
# df = df[df > 943]
df.sort_values().plot(linewidth=5, ylim=(0, df.max() + 20))
print("avg:", df.mean())
print("std div:", df.std())
plt.plot((0, len(df)), (df.mean(), df.mean()), "k-")
plt.plot((0, len(df)), (df.mean() - df.std(), df.mean() - df.std()), "k-.")
plt.plot((0, len(df)), (df.mean() + df.std(), df.mean() + df.std()), "k-.")
plt.plot((253, 253), (0, df.max() + 20), "k-")
plt.plot((479, 479), (0, df.max() + 20), "r-")
plt.plot((394, 394), (0, df.max() + 20), "r-")
plt.plot((253, 253), (0, df.max() + 20), "r-")
plt.plot((62, 62), (0, df.max() + 20), "r-")
plt.text(
479,
800,
"SF 160S",
ha="center",
bbox={"facecolor": "white", "alpha": 1, "pad": 5, "linewidth": 0},
)
plt.text(
394,
800,
"LG290N1C",
ha="center",
bbox={"facecolor": "white", "alpha": 1, "pad": 5, "linewidth": 0},
)
plt.text(
253,
800,
"STP280S",
ha="center",
bbox={"facecolor": "white", "alpha": 1, "pad": 5, "linewidth": 0},
)
plt.text(
62,
800,
"BP2150S",
ha="center",
bbox={"facecolor": "white", "alpha": 1, "pad": 5, "linewidth": 0},
)
plt.xticks(np.arange(0, len(df), 40), range(0, len(df), 40))
plt.ylim(500, 1400)
plt.xlim(0, len(df))
plt.ylabel("Volllaststunden")
plt.xlabel("ID des Moduls")
plt.subplots_adjust(right=0.98, left=0.09, bottom=0.12, top=0.95)
return "module_comparison", None
def fig_analyse_multi_files():
plt.rcParams.update({"font.size": 10})
path = os.path.join(cfg.get("paths", "data_my_reegis"))
fn = os.path.join(path, "multiyear_yield_sum.csv")
df = pd.read_csv(fn, index_col=[0, 1])
gdf = data_analysis.get_coastdat_onshore_polygons()
gdf.geometry = gdf.buffer(0.005)
for key in gdf.index:
s = df[str(key)]
pt = gdf.loc[key]
gdf.loc[key, "tilt"] = s[s == s.max()].index.get_level_values("tilt")[
0
]
gdf.loc[key, "azimuth"] = s[s == s.max()].index.get_level_values(
"azimuth"
)[0]
gdf.loc[key, "longitude"] = pt.geometry.centroid.x
gdf.loc[key, "latitude"] = pt.geometry.centroid.y
gdf.loc[key, "tilt_calc"] = round(pt.geometry.centroid.y - 15)
gdf.loc[key, "tilt_diff"] = abs(
gdf.loc[key, "tilt_calc"] - gdf.loc[key, "tilt"]
)
gdf.loc[key, "tilt_diff_c"] = abs(gdf.loc[key, "tilt"] - 36.5)
gdf.loc[key, "azimuth_diff"] = abs(gdf.loc[key, "azimuth"] - 178.5)
cmap_t = plt.get_cmap("viridis", 8)
cmap_az = plt.get_cmap("viridis", 7)
cm_gyr = LinearSegmentedColormap.from_list(
"mycmap", [(0, "green"), (0.5, "yellow"), (1, "red")], 6
)
f, ax_ar = plt.subplots(3, 2, sharey=True, sharex=True, figsize=(7, 8))
ax_ar[0][0].set_title("Azimuth (optimal)", loc="center", y=1)
gdf.plot(
"azimuth",
legend=True,
cmap=cmap_az,
vmin=173,
vmax=187,
ax=ax_ar[0][0],
aspect="equal",
)
ax_ar[1][0].set_title("Neigung (optimal)", loc="center", y=1)
gdf.plot(
"tilt",
legend=True,
vmin=32.5,
vmax=40.5,
cmap=cmap_t,
ax=ax_ar[1][0],
aspect="equal",
)
ax_ar[2][0].set_title("Neigung (nach Breitengrad)", loc="center", y=1)
gdf.plot(
"tilt_calc",
legend=True,
vmin=32.5,
vmax=40.5,
cmap=cmap_t,
ax=ax_ar[2][0],
aspect="equal",
)
ax_ar[0][1].set_title(
"Azimuth (Differenz - optimal zu 180°)", loc="center", y=1,
)
gdf.plot(
"azimuth_diff",
legend=True,
vmin=-0.5,
vmax=5.5,
cmap=cm_gyr,
ax=ax_ar[0][1],
aspect="equal",
)
ax_ar[1][1].set_title(
"Neigung (Differenz - optimal zu Breitengrad)", loc="center", y=1
)
gdf.plot(
"tilt_diff",
legend=True,
vmin=-0.5,
vmax=5.5,
cmap=cm_gyr,
ax=ax_ar[1][1],
aspect="equal",
)
ax_ar[2][1].set_title(
"Neigung (Differenz - optimal zu 36,5°)", loc="center", y=1
)
gdf.plot(
"tilt_diff_c",
legend=True,
vmin=-0.5,
vmax=5.5,
cmap=cm_gyr,
ax=ax_ar[2][1],
aspect="equal",
)
plt.subplots_adjust(right=1, left=0.05, bottom=0.05, top=0.95, wspace=0.11)
return "analyse_optimal_orientation", None
def fig_polar_plot_pv_orientation():
plt.rcParams.update({"font.size": 14})
key = 1129089
path = os.path.join(cfg.get("paths", "data_my_reegis"))
fn = os.path.join(path, "{0}_combined_c.csv".format(key))
df = pd.read_csv(fn, index_col=[0, 1])
df.reset_index(inplace=True)
df["rel"] = df["2"] / df["2"].max()
azimuth_opt = float(df[df["2"] == df["2"].max()]["1"])
tilt_opt = float(df[df["2"] == df["2"].max()]["0"])
print(azimuth_opt, tilt_opt)
print(tilt_opt - 5)
print(df[(df["1"] == azimuth_opt + 5) & (df["0"] == tilt_opt + 5)])
print(df[(df["1"] == azimuth_opt - 5) & (df["0"] == tilt_opt + 5)])
print(
df[(df["1"] == azimuth_opt + 5) & (df["0"] == round(tilt_opt - 5, 1))]
)
print(
df[(df["1"] == azimuth_opt - 5) & (df["0"] == round(tilt_opt - 5, 1))]
)
# Data
tilt = df["0"]
azimuth = df["1"] / 180 * np.pi
colors = df["2"] / df["2"].max()
# Colormap
cmap = plt.get_cmap("viridis", 20)
# Plot
fig = plt.figure(figsize=(9, 4))
ax = fig.add_subplot(111, projection="polar")
sc = ax.scatter(azimuth, tilt, c=colors, cmap=cmap, alpha=1, vmin=0.8)
ax.tick_params(pad=10)
# Colorbar
label = "Anteil vom maximalen Ertrag"
cax = fig.add_axes([0.89, 0.15, 0.02, 0.75])
fig.colorbar(sc, cax=cax, label=label, ticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_theta_zero_location("S", offset=0)
# Adjust radius
# ax.set_rmax(90)
ax.set_rlabel_position(110)
t_upper = tilt_opt + 5
t_lower = tilt_opt - 5
az_upper = azimuth_opt + 5
az_lower = azimuth_opt - 5
bbox_props = dict(boxstyle="round", fc="white", alpha=0.5, lw=0)
ax.annotate(
">0.996",
xy=((az_upper - 5) / 180 * np.pi, t_upper),
xytext=((az_upper + 3) / 180 * np.pi, t_upper + 3),
# textcoords='figure fraction',
arrowprops=dict(facecolor="black", arrowstyle="-"),
horizontalalignment="left",
verticalalignment="bottom",
bbox=bbox_props,
)
print(az_upper)
print(t_upper)
ax.text(
238 / 180 * np.pi,
60,
"Ausrichtung (Süd=180°)",
rotation=50,
horizontalalignment="center",
verticalalignment="center",
)
ax.text(
65 / 180 * np.pi,
35,
"Neigungswinkel (horizontal=0°)",
rotation=0,
horizontalalignment="center",
verticalalignment="center",
)
az = (
np.array([az_lower, az_lower, az_upper, az_upper, az_lower])
/ 180
* np.pi
)
t = np.array([t_lower, t_upper, t_upper, t_lower, t_lower])
ax.plot(az, t)
ax.set_rmax(50)
ax.set_rmin(20)
ax.set_thetamin(90)
ax.set_thetamax(270)
# Adjust margins
plt.subplots_adjust(right=0.94, left=0, bottom=-0.15, top=1.2)
return "polar_plot_pv_orientation.png", None
def fig_windzones():
# ax.set_axis_off()
plt.show()
path = cfg.get("paths", "geometry")
filename = "windzones_germany.geojson"
df = geometries.load(path=path, filename=filename)
df.set_index("zone", inplace=True)
geo_path = cfg.get("paths", "geometry")
geo_file = cfg.get("coastdat", "coastdatgrid_polygon")
coastdat_geo = geometries.load(path=geo_path, filename=geo_file)
coastdat_geo["poly"] = coastdat_geo.geometry
coastdat_geo["geometry"] = coastdat_geo.centroid
points = geometries.spatial_join_with_buffer(coastdat_geo, df, "windzone")
polygons = points.set_geometry("poly")
cmap_bluish = LinearSegmentedColormap.from_list(
"bluish", [(0, "#8fbbd2"), (1, "#00317a")], 4
)
ax = polygons.plot(
column="windzone",
edgecolor="#666666",
linewidth=0.5,
cmap=cmap_bluish,
vmin=0.5,
vmax=4.5,
aspect="equal",
)
ax.set_axis_off()
df.boundary.simplify(0.01).plot(
edgecolor="black", alpha=1, ax=ax, linewidth=1.5, aspect="equal",
)
text = {"1": (9, 50), "2": (12, 52), "3": (9.8, 54), "4": (6.5, 54.6)}
for t, c in text.items():
plt.text(
c[0],
c[1],
t,
size=15,
ha="center",
va="center",
bbox=dict(boxstyle="round", alpha=0.5, ec=(1, 1, 1), fc=(1, 1, 1)),
)
plt.subplots_adjust(left=0, top=1, bottom=0, right=1)
return "windzones", None
def fig_show_hydro_image():
create_subplot((12, 4.4))
file = "abflussregime.png"
fn = os.path.join(cfg.get("paths", "figure_source"), file)
fn_target = os.path.join(cfg.get("paths", "figures"), file)
shutil.copy(fn, fn_target)
img = mpimg.imread(fn)
plt.imshow(img)
plt.axis("off")
plt.title(
"Image source: https://www.iksr.org/fileadmin/user_upload/DKDM/"
"Dokumente/Fachberichte/DE/rp_De_0248.pdf; S.16"
)
plt.subplots_adjust(left=0, top=0.93, bottom=0, right=1)
return "abflussregime", None
def fig_compare_re_capacity_years():
# from reegis import bmwi
plt.rcParams.update({"font.size": 18})
f, ax_ar = plt.subplots(1, 2, sharey=True, sharex=True, figsize=(15, 5))
years = [2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]
my_bmwi = bmwi.bmwi_re_energy_capacity().loc[years].div(1000)
my_bmwi.set_index(
pd.to_datetime(my_bmwi.index.astype(str) + "-12-31")
+ pd.DateOffset(1),
inplace=True,
)
bmwi_solar = my_bmwi["solar", "capacity"]
bmwi_solar.name = "Solar (BMWI)"
bmwi_wind = my_bmwi["wind", "capacity"]
bmwi_wind.name = "Wind (BMWI)"
ax_ar[0] = bmwi_solar.plot(
marker="D",
ax=ax_ar[0],
linestyle="None",
markersize=10,
color="#ff5500",
alpha=0.7,
legend=True,
)
ax_ar[1] = bmwi_wind.plot(
marker="D",
ax=ax_ar[1],
linestyle="None",
markersize=10,
color="#111539",
alpha=0.7,
legend=True,
)
my_re = entsoe.get_entsoe_renewable_data(version="2019-06-05").div(1000)
my_re = my_re.resample("D").mean()
print(my_re.index)
rn = {
"DE_solar_capacity": "Solar (OPSD)",
"DE_wind_capacity": "Wind (OPSD)",
}
my_re.rename(columns=rn, inplace=True)
ax_ar[0] = my_re["Solar (OPSD)"].plot(
ax=ax_ar[0], color="#ffba00", legend=True
)
ax_ar[1] = my_re["Wind (OPSD)"].plot(
ax=ax_ar[1], color="#4254ff", legend=True
)
fs = geometries.get_federal_states_polygon()
df = pd.DataFrame()
for y in years:
my_pp = powerplants.get_powerplants_by_region(fs, y, "federal_states")
for cat in ["Solar", "Wind"]:
dt = datetime.datetime(y, 1, 1)
cat_name = "{0} (reegis)".format(cat)
col = "capacity_{0}".format(y)
df.loc[dt, cat_name] = my_pp.groupby(level=1).sum().loc[cat, col]
df = df.div(1000)
ax_ar[0] = df["Solar (reegis)"].plot(
drawstyle="steps-post", ax=ax_ar[0], color="#ff7000", legend=True
)
ax_ar[1] = df["Wind (reegis)"].plot(
drawstyle="steps-post", ax=ax_ar[1], color=["#1b2053"], legend=True
)
ax_ar[0].set_xlim(
left=datetime.datetime(2012, 1, 1), right=datetime.datetime(2018, 1, 1)
)
plt.ylim((25, 60))
ax_ar[0].set_ylabel("Installierte Leistung [GW]")
ax_ar[0].set_xlabel(" ")
ax_ar[1].set_xlabel(" ")
ax_ar[0].legend(loc="upper left")
ax_ar[1].legend(loc="upper left")
plt.subplots_adjust(
right=0.98, left=0.06, bottom=0.11, top=0.94, wspace=0.16
)
return "compare_re_capacity_years", None
def fig_compare_full_load_hours():
plt.rcParams.update({"font.size": 18})
f, ax_ar = plt.subplots(2, 2, sharex=True, figsize=(15, 7))
# # colors greyscale
# wind1 = "#999999"
# wind2 = "#333333"
# solar1 = "#999999"
# solar2 = "#333333"
# colors
wind1 = "#4254ff"
wind2 = "#1b2053"
solar1 = "#ffba00"
solar2 = "#ff7000"
fn = os.path.join(
cfg.get("paths", "data_my_reegis"),
"full_load_hours_re_bdew_states.csv",
)
flh = pd.read_csv(fn, index_col=[0], header=[0, 1])
regions = geometries.get_federal_states_polygon()
for y in [2014, 2012]:
re_rg = feedin.scenario_feedin(regions, y, "fs").swaplevel(axis=1)
flh["Wind (reegis)", str(y)] = re_rg["wind"].sum()
flh["Solar (reegis)", str(y)] = re_rg["solar"].sum()
ax_ar[0, 0] = flh[
[("Wind (BDEW)", "2012"), ("Wind (reegis)", "2012")]
].plot(kind="bar", ax=ax_ar[0, 0], color=[wind1, wind2], legend=False)
ax_ar[0, 1] = flh[
[("Wind (BDEW)", "2014"), ("Wind (reegis)", "2014")]
].plot(kind="bar", ax=ax_ar[0, 1], color=[wind1, wind2], legend=False)
ax_ar[1, 0] = flh[
[("Solar (BDEW)", "2012"), ("Solar (reegis)", "2012")]
].plot(kind="bar", ax=ax_ar[1, 0], color=[solar1, solar2], legend=False)
ax_ar[1, 1] = flh[
[("Solar (BDEW)", "2014"), ("Solar (reegis)", "2014")]
].plot(kind="bar", ax=ax_ar[1, 1], color=[solar1, solar2], legend=False)
ax_ar[0, 0].set_title("2012")
ax_ar[0, 1].set_title("2014")
ax_ar[0, 1].legend(
loc="upper left", bbox_to_anchor=(1, 1), labels=["BDEW", "reegis"]
)
ax_ar[1, 1].legend(
loc="upper left", bbox_to_anchor=(1, 1), labels=["BDEW", "reegis"]
)
ax_ar[0, 0].set_ylabel("Volllaststunden\nWindkraft")
ax_ar[1, 0].set_ylabel("Volllaststunden\nPhotovoltaik")
plt.subplots_adjust(
right=0.871, left=0.098, bottom=0.11, top=0.94, wspace=0.16, hspace=0.1
)
return "compare_full_load_hours", None
def fig_compare_feedin_solar():
plt.rcParams.update({"font.size": 18})
f, ax_ar = plt.subplots(2, 1, sharey=True, figsize=(15, 6))
# Get feedin time series from reegis
regions = geometries.get_federal_states_polygon()
re_rg = feedin.scenario_feedin(regions, 2014, "fs").set_index(
pd.date_range(
"31/12/2013 23:00:00", periods=8760, freq="H", tz="Europe/Berlin"
)
)
# Get entsoe time series for pv profiles from opsd
url = (
"https://data.open-power-system-data.org/index.php?package"
"=time_series&version={version}&action=customDownload&resource=3"
"&filter%5B_contentfilter_cet_cest_timestamp%5D%5Bfrom%5D=2005-01"
"-01&filter%5B_contentfilter_cet_cest_timestamp%5D%5Bto%5D=2019-05"
"-01&filter%5BRegion%5D%5B%5D=DE&filter%5BVariable%5D%5B%5D"
"=solar_capacity&filter%5BVariable%5D%5B%5D=solar_generation_actual"
"&filter%5BVariable%5D%5B%5D=solar_profile&downloadCSV=Download+CSV"
)
my_re = entsoe.get_filtered_file(
url=url, name="solar_de_2019-06-05", version="2019-06-05"
)
# Convert index to datetime
my_re.set_index(
pd.to_datetime(my_re["utc_timestamp"], utc=True).dt.tz_convert(
"Europe/Berlin"
),
inplace=True,
)
my_re.drop(["cet_cest_timestamp", "utc_timestamp"], axis=1, inplace=True)
# Convert columns to numeric
for c in my_re.columns:
my_re[c] = pd.to_numeric(my_re[c]).div(1000)
# Plot opsd data
cso = "#ff7e00"
csr = "#500000"
my_re["DE_solar_profile"].multiply(1000)
ax = my_re["DE_solar_profile"].multiply(1000).plot(ax=ax_ar[0], color=cso)
ax2 = my_re["DE_solar_profile"].multiply(1000).plot(ax=ax_ar[1], color=cso)
fs = geometries.get_federal_states_polygon()
pp = powerplants.get_powerplants_by_region(fs, 2014, "federal_states")
total_capacity = pp.capacity_2014.swaplevel().loc["Solar"].sum()
re_rg = re_rg.swaplevel(axis=1)["solar"].mul(
pp.capacity_2014.swaplevel().loc["Solar"]
)
# Plot reegis time series
# June
ax = (
re_rg.sum(axis=1)
.div(total_capacity)
.plot(
ax=ax,
rot=0,
color=csr,
xlim=(
datetime.datetime(2014, 6, 1),
datetime.datetime(2014, 6, 30),
),
)
)
# December
ax2 = (
re_rg.sum(axis=1)
.div(total_capacity)
.plot(
ax=ax2,
rot=0,
color=csr,
xlim=(
datetime.datetime(2014, 12, 1),
datetime.datetime(2014, 12, 30),
),
)
)
# x-ticks for June
dates = [
datetime.datetime(2014, 6, 1),
datetime.datetime(2014, 6, 5),
datetime.datetime(2014, 6, 9),
datetime.datetime(2014, 6, 13),
datetime.datetime(2014, 6, 17),
datetime.datetime(2014, 6, 21),
datetime.datetime(2014, 6, 25),
datetime.datetime(2014, 6, 29),
]
ax.set_xticks([pandas_datetime for pandas_datetime in dates])
labels = [pandas_datetime.strftime("%d. %b.") for pandas_datetime in dates]
labels[0] = ""
ax.set_xticklabels(labels, ha="center", rotation=0)
# xticks for December
dates = [
datetime.datetime(2014, 12, 1),
datetime.datetime(2014, 12, 5),
datetime.datetime(2014, 12, 9),
datetime.datetime(2014, 12, 13),
datetime.datetime(2014, 12, 17),
datetime.datetime(2014, 12, 21),
datetime.datetime(2014, 12, 25),
datetime.datetime(2014, 12, 29),
]
ax2.set_xticks([pandas_datetime for pandas_datetime in dates])
labels = [pandas_datetime.strftime("%d. %b.") for pandas_datetime in dates]
labels[0] = ""
ax2.set_xticklabels(labels, ha="center", rotation=0)
ax.legend(labels=["OPSD", "reegis"])
ax.set_xlabel("")
ax.set_ylim((0, 1.1))
ax2.set_xlabel("Juni/Dezember 2014")
ax2.xaxis.labelpad = 20
# Plot Text
x0 = datetime.datetime(2014, 12, 1, 5, 0)
x1 = datetime.datetime(2014, 12, 1, 8, 0)
x2 = datetime.datetime(2014, 12, 3, 1, 0)
start = datetime.datetime(2014, 1, 1)
end = datetime.datetime(2015, 1, 1)
# BMWI
# https://www.bmwi.de/Redaktion/DE/Publikationen/Energie/
# erneuerbare-energien-in-zahlen-2017.pdf?__blob=publicationFile&v=27
bmwi_sum = round(36.056)
reegis_sum = round(re_rg.sum().sum() / 1000000)
opsd_sum = round(
my_re.DE_solar_generation_actual.loc[start:end].sum() / 1000
)
text = {
"title": (x1, 1, " Summe 2014"),
"op1": (x1, 0.85, "OPSD"),
"op2": (x2, 0.85, "{0} GWh".format(int(opsd_sum))),
"reg1": (x1, 0.70, "reegis"),
"reg2": (x2, 0.70, "{0} GWh".format(int(reegis_sum))),
"bmwi1": (x1, 0.55, "BMWi"),
"bmwi2": (x2, 0.55, "{0} GWh".format(int(bmwi_sum))),
}
for t, c in text.items():
if t == "title":
w = "bold"
else:
w = "normal"
ax2.text(c[0], c[1], c[2], weight=w, size=16, ha="left", va="center")
# Plot Box
x3 = mdates.date2num(x0)
b = patches.Rectangle((x3, 0.5), 3.9, 0.57, color="#cccccc")
ax2.add_patch(b)
ax2.add_patch(patches.Shadow(b, -0.05, -0.01))
plt.subplots_adjust(right=0.99, left=0.05, bottom=0.16, top=0.97)
return "compare_feedin_solar", None
def fig_compare_feedin_wind_absolute():
fig_compare_feedin_wind(scale_reegis=False)
return "compare_feedin_wind_absolute", None
def fig_compare_feedin_wind_scaled():
fig_compare_feedin_wind(scale_reegis=True)
return "compare_feedin_wind_scaled", None
def fig_compare_feedin_wind(scale_reegis):
plt.rcParams.update({"font.size": 18})
f, ax_ar = plt.subplots(2, 1, sharey=True, figsize=(15, 6))
# colors
cwo = "#665eff"
cwr = "#0a085e"
# Get entsoe time series for wind profiles from opsd
url = (
"https://data.open-power-system-data.org/index.php?package"
"=time_series&version={version}&action=customDownload&resource=3"
"&filter%5B_contentfilter_cet_cest_timestamp%5D%5Bfrom%5D=2005-01"
"-01&filter%5B_contentfilter_cet_cest_timestamp%5D%5Bto%5D=2019-05"
"-01&filter%5BRegion%5D%5B%5D=DE&filter%5BVariable%5D%5B%5D"
"=wind_capacity&filter%5BVariable%5D%5B%5D=wind_generation_actual"
"&filter%5BVariable%5D%5B%5D=wind_profile&downloadCSV=Download+CSV"
)
re_en = entsoe.get_filtered_file(
url=url, name="wind_de_2019-06-05", version="2019-06-05"
)
# Convert index to datetime
re_en.set_index(
| pd.to_datetime(re_en["utc_timestamp"], utc=True) | pandas.to_datetime |
"""
The MIT License (MIT)
Copyright (c) 2016 <NAME>
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import tempfile
import gym
import numpy as np
import pandas as pd
from gym import spaces
from gym.utils import seeding
from sklearn.preprocessing import scale
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.info('%s logger started.', __name__)
class DataSource:
"""
Data source for TradingEnvironment
Loads & preprocesses daily price & volume data
Provides data for each new episode.
Stocks with longest history:
ticker # obs
KO 14155
GE 14155
BA 14155
CAT 14155
DIS 14155
"""
def __init__(self, trading_days=252, ticker='AAPL', normalize=True, min_perc_days=100):
self.ticker = ticker
self.trading_days = trading_days + 1
self.normalize = normalize
self.min_perc_days = min_perc_days
self.data = self.load_data()
self.preprocess_data()
self.min_values = self.data.min()
self.max_values = self.data.max()
self.step = 0
self.idx = None
def load_data(self):
log.info('loading data for {}...'.format(self.ticker))
idx = pd.IndexSlice
with pd.HDFStore('../data/assets.h5') as store:
df = (store['quandl/wiki/prices']
.loc[idx[:, self.ticker],
['adj_close', 'adj_volume']]
.dropna())
df.columns = ['close', 'volume']
log.info('got data for {}...'.format(self.ticker))
return df
@staticmethod
def rsi(data, window=14):
diff = data.diff().dropna()
up, down = diff.copy(), diff.copy()
up[up < 0] = 0
down[down > 0] = 0
rolling_up = up.rolling(window).mean()
rolling_down = down.abs().rolling(window).mean()
RS2 = rolling_up / rolling_down
return 100 - (100 / (1 + RS2))
def momentum(self, data, window=100):
def pct_rank(x):
return pd.Series(x).rank(pct=True).iloc[-1]
return data.rolling(window).apply(pct_rank, raw=True)
def preprocess_data(self):
"""calculate returns and percentiles, then removes missing values"""
# make volume positive and pre-scale
self.data.volume = np.log(self.data.volume.replace(0, 1))
self.data['returns'] = self.data.close.pct_change()
self.data['close_pct_100'] = self.momentum(self.data.close, window=100)
self.data['volume_pct_100'] = self.momentum(self.data.volume, window=100)
self.data['close_pct_20'] = self.momentum(self.data.close, window=20)
self.data['volume_pct_20'] = self.momentum(self.data.volume, window=20)
self.data['return_5'] = self.data.returns.pct_change(5)
self.data['return_21'] = self.data.returns.pct_change(21)
self.data['rsi'] = self.rsi(self.data.close)
self.data = self.data.replace((np.inf, -np.inf), np.nan).dropna()
r = self.data.returns.copy()
if self.normalize:
self.data = pd.DataFrame(scale(self.data),
columns=self.data.columns,
index=self.data.index)
self.data['returns'] = r # don't scale returns
log.info(self.data.info())
def reset(self):
"""Provides starting index for time series and resets step"""
high = len(self.data.index) - self.trading_days
self.idx = np.random.randint(low=0, high=high)
self.step = 0
def take_step(self):
"""Returns data for current trading day and done signal"""
obs = self.data.iloc[self.idx].values
self.idx += 1
self.step += 1
done = self.step >= self.trading_days
return obs, done
class TradingSimulator:
""" Implements core trading simulator for single-instrument univ """
def __init__(self, steps, trading_cost_bps, time_cost_bps):
# invariant for object life
self.trading_cost_bps = trading_cost_bps
self.time_cost_bps = time_cost_bps
self.steps = steps
# change every step
self.step = 0
self.actions = np.zeros(self.steps)
self.navs = np.ones(self.steps)
self.market_navs = np.ones(self.steps)
self.strategy_returns = np.ones(self.steps)
self.positions = np.zeros(self.steps)
self.costs = np.zeros(self.steps)
self.trades = np.zeros(self.steps)
self.market_returns = np.zeros(self.steps)
def reset(self):
self.step = 0
self.actions.fill(0)
self.navs.fill(1)
self.market_navs.fill(1)
self.strategy_returns.fill(0)
self.positions.fill(0)
self.costs.fill(0)
self.trades.fill(0)
self.market_returns.fill(0)
def take_step(self, action, market_return):
""" Calculates NAVs, trading costs and reward
based on an action and latest market return
etc and returns the reward and a summary of the day's activity. """
bod_position = 0.0 if self.step == 0 else self.positions[self.step - 1]
bod_nav = 1.0 if self.step == 0 else self.navs[self.step - 1]
bod_market_nav = 1.0 if self.step == 0 else self.market_navs[self.step - 1]
self.market_returns[self.step] = market_return
self.actions[self.step] = action
self.positions[self.step] = action - 1
self.trades[self.step] = self.positions[self.step] - bod_position
trade_costs_pct = abs(self.trades[self.step]) * self.trading_cost_bps
self.costs[self.step] = trade_costs_pct + self.time_cost_bps
reward = ((bod_position * market_return) - self.costs[self.step])
self.strategy_returns[self.step] = reward
if self.step != 0:
self.navs[self.step] = bod_nav * (1 + self.strategy_returns[self.step - 1])
self.market_navs[self.step] = bod_market_nav * (1 + self.market_returns[self.step - 1])
info = {'reward': reward,
'nav' : self.navs[self.step],
'costs' : self.costs[self.step]}
self.step += 1
return reward, info
def result(self):
"""returns current state as pd.DataFrame """
return pd.DataFrame({'action' : self.actions, # current action
'nav' : self.navs, # starting Net Asset Value (NAV)
'market_nav' : self.market_navs,
'market_return' : self.market_returns,
'strategy_return': self.strategy_returns,
'position' : self.positions, # eod position
'cost' : self.costs, # eod costs
'trade' : self.trades}) # eod trade)
class TradingEnvironment(gym.Env):
"""A simple trading environment for reinforcement learning.
Provides daily observations for a stock price series
An episode is defined as a sequence of 252 trading days with random start
Each day is a 'step' that allows the agent from three actions:
SHORT (0)
FLAT (1)
LONG (2)
Trades cost 10bps of the change in position value.
Going from short to long implies two trades.
Not trading also a default time cost of 1bps per step.
An episode begins with a starting Net Asset Value (NAV) of 1 unit of cash.
If the NAV drops to 0, the episode is ends with a loss.
If the NAV hits 2.0, the agent wins.
The trading simulator tracks a buy-and-hold strategy as benchmark.
"""
metadata = {'render.modes': ['human']}
def __init__(self, trading_days=252, trading_cost_bps=1e-3, time_cost_bps=1e-4, ticker='AAPL'):
self.trading_days = trading_days
self.ticker = ticker
self.trading_cost_bps = trading_cost_bps
self.time_cost_bps = time_cost_bps
self.src = DataSource(trading_days=self.trading_days, ticker=ticker)
self.sim = TradingSimulator(steps=self.trading_days,
trading_cost_bps=self.trading_cost_bps,
time_cost_bps=self.time_cost_bps)
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.src.min_values,
self.src.max_values)
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""Returns state observation, reward, done and info"""
assert self.action_space.contains(action), '{} {} invalid'.format(action, type(action))
observation, done = self.src.take_step()
reward, info = self.sim.take_step(action=action,
market_return=observation[2])
return observation, reward, done, info
def reset(self):
"""Resets DataSource and TradingSimulator; returns first observation"""
self.src.reset()
self.sim.reset()
return self.src.take_step()[0]
# TODO
def render(self, mode='human'):
"""Not implemented"""
pass
def run_strategy(self, strategy, return_df=True):
"""Runs strategy, returns DataFrame with all steps"""
observation = self.reset()
done = False
while not done:
action = strategy(observation, self) # call strategy
observation, reward, done, info = self.step(action)
return self.sim.result() if return_df else None
def run_strategy_episodes(self, strategy, episodes=1, write_log=True, return_df=True):
""" run provided strategy the specified # of times, possibly
writing a log and possibly returning a dataframe summarizing activity.
Note that writing the log is expensive and returning the df is more so.
For training purposes, you might not want to set both.
"""
logfile = None
if write_log:
logfile = tempfile.NamedTemporaryFile(delete=False, mode='w+')
log.info('writing log to %s', logfile.name)
need_df = write_log or return_df
alldf = None
for i in range(episodes):
df = self.run_strategy(strategy, return_df=need_df)
if write_log:
df.to_csv(logfile, mode='ab')
if return_df:
alldf = df if alldf is None else | pd.concat([alldf, df], axis=0) | pandas.concat |
import os
import pandas as pd
import numpy as np
import copy
from pprint import pprint
def work(pres):
count = [0, 0]
for i in pres:
count[i] += 1
out = count.index(max(count))
return out
def simple_vote(model_name, date, dataset, pseudo=False):
if pseudo:
DATA_DIR = '../predict_data/{}_{}_pseudo/{}/'.format(model_name, date, dataset)
else:
DATA_DIR = '../predict_data/{}_{}/{}/'.format(model_name, date, dataset)
files = os.listdir(DATA_DIR)
files = [i for i in files]
i = 0
for fname in files:
tmp_df = pd.read_csv(DATA_DIR + fname)
tmp_df = pd.DataFrame(tmp_df, columns=['ID', 'Label'])
if i == 0:
df_merged = pd.read_csv(DATA_DIR + fname)
df_merged = pd.DataFrame(df_merged, columns=['ID', 'Label'])
if i > 0:
df_merged = df_merged.merge(tmp_df, how='left', on='ID')
print(df_merged.shape)
i += 1
tmp_label = np.array(df_merged.iloc[:, 1:])
voted_label = [work(line) for line in tmp_label]
df_summit = df_merged[['ID']]
df_summit = df_summit.copy()
df_summit['Label'] = voted_label
if pseudo:
save_path = '../predict_data/{}_{}_pseudo/vote'.format(model_name, date)
else:
save_path = '../predict_data/{}_{}/vote'.format(model_name, date)
if not os.path.exists(save_path):
os.makedirs(save_path, mode=0o777)
file_path = '{}/{}-{}-voted.csv'.format(save_path, model_name, dataset)
df_summit.to_csv(file_path, index=None)
print("写入成功!")
def aug_vote(model_name, date, dataset, pseudo=False):
if pseudo:
DATA_DIR = '../predict_data/aug_data/{}_{}_pseudo/{}/'.format(model_name, date, dataset)
else:
DATA_DIR = '../predict_data/aug_data/{}_{}/{}/'.format(model_name, date, dataset)
files = os.listdir(DATA_DIR)
files = [i for i in files]
i = 0
for fname in files:
tmp_df = pd.read_csv(DATA_DIR + fname)
tmp_df = pd.DataFrame(tmp_df, columns=['ID', 'Label'])
if i == 0:
df_merged = pd.read_csv(DATA_DIR + fname)
df_merged = pd.DataFrame(df_merged, columns=['ID', 'Label'])
if i > 0:
df_merged = df_merged.merge(tmp_df, how='left', on='ID')
print(df_merged.shape)
i += 1
df_data = pd.read_csv('../data/test_data/cn_test.csv', sep=',')
# df_data = pd.read_csv('../data/test_data/cn_test.csv', sep='\t', names=["ID", "Speaker", "Sentence"])
ID_list = [i for i in range(df_data.shape[0])]
df_data['ID'] = pd.Series(ID_list)
df_merged = df_merged.merge(df_data, how='left', on='ID')
speaker_list, sentence_list, label_list = [], [], []
humor_speaker_list, humor_sentence_list, humor_label_list = [], [], []
un_speaker_list, un_sentence_list, un_label_list = [], [], []
for index, line in df_merged.iterrows():
label_1 = int(line[1])
label_2 = int(line[2])
label_3 = int(line[3])
label_4 = int(line[4])
label_5 = int(line[5])
speaker = line[8]
sentence = line[9]
label = None
if label_1 + label_2 + label_3 + label_4 + label_5 == 5:
label = 1
humor_speaker_list.append(speaker)
humor_sentence_list.append(sentence)
humor_label_list.append(label)
elif label_1 == label_2 == label_3 == label_4 == label_5 == 0:
label = 0
un_speaker_list.append(speaker)
un_sentence_list.append(sentence)
un_label_list.append(label)
if label is not None:
speaker_list.append(speaker)
sentence_list.append(sentence)
label_list.append(label)
print(len(speaker_list), len(sentence_list), len(label_list))
print(len(humor_speaker_list), len(humor_sentence_list), len(humor_label_list))
print(len(un_speaker_list), len(un_sentence_list), len(un_label_list))
idx_list = [i for i in range(len(speaker_list))]
humor_idx_list = [i for i in range(len(humor_speaker_list))]
un_idx_list = [i for i in range(len(un_speaker_list))]
# * tsv格式
final_data = list(zip(idx_list, speaker_list, sentence_list, label_list))
final_data = pd.DataFrame(final_data, columns=['ID', 'Speaker', 'Sentence', 'Label'])
humor_final_data = list(zip(humor_idx_list, humor_speaker_list, humor_sentence_list, humor_label_list))
humor_final_data = | pd.DataFrame(humor_final_data, columns=['ID', 'Speaker', 'Sentence', 'Label']) | pandas.DataFrame |
import re
import logging
from functools import reduce, partial
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from influxdb.resultset import ResultSet
from requests.exceptions import RequestException
from .connection import get_client, InfluxDBException, _timeout
from .util import aslist, asstr
from .db import _check_table, _CATEGORICAL_COLUMNS, AGGREGATE
from . import db
__all__ = ['query', 'query_async', 'getdf']
log = logging.getLogger(__name__)
def query(query: str, **kwargs) -> ResultSet:
"""
Fetch results of a raw SQL query.
Parameters
----------
query : str
An SQL query to fetch results for.
kwargs :
Passed to ``influxdb.client.InfluxDBClient``.
Returns
-------
influxdb.resultset.ResultSet
"""
try:
client = get_client()
except InfluxDBException:
log.exception('Failed to instantiate InfluxDB client:')
raise
kwargs.setdefault('epoch', 'ms')
try:
log.debug('Executing query: %s', query)
result = client.query(query, **kwargs)
log.debug('Result set size: %d, %d rows', len(result), len(tuple(result.get_points())))
return result
except RequestException:
log.error('Failed to execute query in %d seconds: %s', _timeout, query)
raise
except InfluxDBException:
log.error('Failed to execute query: %s', query)
raise
def query_async(queries: list, callback=None, **kwargs) -> ResultSet:
"""
Generator fetching results of SQL queries in an asynchronous manner.
Parameters
----------
queries : list of str
An list of SQL queries to fetch results for.
callback : callable
The function to call after each successfully executed query.
kwargs :
Passed to ``influxdb.client.InfluxDBClient``.
Yields
------
influxdb.resultset.ResultSet
"""
if isinstance(queries, str):
queries = [queries]
with ThreadPoolExecutor(max_workers=len(queries)) as executor:
try:
for future in as_completed((executor.submit(query, query_str, **kwargs)
for query_str in queries),
# +1 to allow InfluxDBClient (requests) to fail first
timeout=_timeout + 1):
yield future.result()
if callback:
callback()
except (futures.TimeoutError, RequestException):
log.error("Failed to execute all queries in %d seconds: %s", _timeout, queries)
raise
def _query_str(table, *, freq, columns='', where='', resample='', limit=1000):
parts = ['SELECT {columns} FROM {table}_{freq}'.format(
columns=asstr(columns) or (table._select_agg() if resample else '*'),
table=str(table),
freq=freq)]
if where:
where = aslist(where, str)
parts.append('WHERE ' + ' AND '.join(where))
if resample:
resample = 'time({}), '.format(resample)
parts.append('GROUP BY ' + (resample + table._groupby()).lstrip(','))
if limit:
parts.append('LIMIT ' + str(int(limit)))
query_str = ' '.join(parts)
return query_str
def merge_asof_helper (left, right, tolerance=None):
#https://github.com/pandas-dev/pandas/issues/16454 pandas doesnt allow multiple pd.Categorical "by" values?, dirty hacks
if 'time' in left.columns.values.tolist():
left.time = pd.to_datetime(left.time, unit='ms')
left.set_index('time', inplace=True)
left.sort_index(inplace=True)
right.time = | pd.to_datetime(right.time, unit='ms') | pandas.to_datetime |
import os
import pandas as pd
import mysql.connector as mysql
from mysql.connector import Error
def DBConnect(dbName=None):
"""
Parameters
----------
dbName :
Default value = None)
Returns
-------
"""
conn = mysql.connect(host='localhost', user='root', password='<PASSWORD>',
database=dbName, buffered=True)
cur = conn.cursor()
return conn, cur
def emojiDB(dbName: str) -> None:
conn, cur = DBConnect(dbName)
dbQuery = f"ALTER DATABASE {dbName} CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"
cur.execute(dbQuery)
conn.commit()
def createDB(dbName: str) -> None:
"""
Parameters
----------
dbName :
str:
dbName :
str:
dbName:str :
Returns
-------
"""
conn, cur = DBConnect()
cur.execute(f"CREATE DATABASE IF NOT EXISTS {dbName};")
conn.commit()
cur.close()
def createTables(dbName: str) -> None:
"""
Parameters
----------
dbName :
str:
dbName :
str:
dbName:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
sqlFile = 'mysql/I80_davis_schema.sql'
fd = open(sqlFile, 'r')
readSqlFile = fd.read()
fd.close()
sqlCommands = readSqlFile.split(';')
for command in sqlCommands:
try:
res = cur.execute(command)
except Exception as ex:
print("Command skipped: ", command)
print(ex)
conn.commit()
cur.close()
return
# def preprocess_df(df: pd.DataFrame) -> pd.DataFrame:
# """
# Parameters
# ----------
# df :
# pd.DataFrame:
# df :
# pd.DataFrame:
# df:pd.DataFrame :
# Returns
# -------
# """
# cols_2_drop = ['Unnamed: 0', 'timestamp', 'sentiment', 'possibly_sensitive', 'original_text']
# try:
# df = df.drop(columns=cols_2_drop, axis=1)
# df = df.fillna(0)
# except KeyError as e:
# print("Error:", e)
# return df
def insert_to_I80_davis_t_table(dbName: str, df: pd.DataFrame, table_name: str) -> None:
"""
Parameters
----------
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName:str :
df:pd.DataFrame :
table_name:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
# df = preprocess_df(df)
for _, row in df.iterrows():
sqlQuery = f"""INSERT INTO {table_name} (timestamp,ID,avg_speed,avg_flow,avg_occ,avg_freeflow_speed,samples_below_100pct_ff,
samples_below_95pct_ff,samples_below_90pct_ff,samples_below_85pct_ff,samples_below_80pct_ff,
samples_below_75pct_ff,samples_below_70pct_ff,samples_below_65pct_ff,samples_below_60pct_ff,
samples_below_55pct_ff,samples_below_50pct_ff,samples_below_45pct_ff,samples_below_40pct_ff,
samples_below_35pct_ff,samples_below_30pct_ff,samples_below_20pct_ff,samples_below_25pct_ff,
samples_below_15pct_ff,samples_below_10pct_ff)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
data = (row[0], row[1], row[2], row[3], (row[4]), (row[5]), row[6], row[7], row[8], row[9], row[10], row[11],
row[12], row[13], row[14], row[15],row[16], row[17], row[18], (row[19]), (row[20]), row[21],row[22],
row[23], row[24])
try:
# Execute the SQL command
cur.execute(sqlQuery, data)
# Commit your changes in the database
conn.commit()
print("Data Inserted Successfully")
except Exception as e:
conn.rollback()
print("Error: ", e)
return
def insert_to_richards_table(dbName: str, df: pd.DataFrame, table_name: str) -> None:
"""
Parameters
----------
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName:str :
df:pd.DataFrame :
table_name:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
# df = preprocess_df(df)
for _, row in df.iterrows():
sqlQuery = f"""INSERT INTO {table_name} (timestamp,flow1,occupancy1,flow2,occupancy2,flow3,occupancy3,totalflow,weekday,hour,minute,second)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
data = (row[0], row[1], row[2], row[3], (row[4]), (row[5]), row[6], row[7], row[8], row[9], row[10], row[11])
try:
# Execute the SQL command
cur.execute(sqlQuery, data)
# Commit your changes in the database
conn.commit()
print("Data Inserted Successfully")
except Exception as e:
conn.rollback()
print("Error: ", e)
return
def insert_to_station_summary_table(dbName: str, df: pd.DataFrame, table_name: str) -> None:
"""
Parameters
----------
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName:str :
df:pd.DataFrame :
table_name:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
# df = preprocess_df(df)
for _, row in df.iterrows():
sqlQuery = f"""INSERT INTO {table_name} (ID,flow_99,flow_max,flow_median,flow_total,n_obs)
VALUES(%s, %s, %s, %s, %s, %s);"""
data = (row[0], row[1], row[2], row[3], row[4], row[5])
try:
# Execute the SQL command
cur.execute(sqlQuery, data)
# Commit your changes in the database
conn.commit()
print("Data Inserted Successfully")
except Exception as e:
conn.rollback()
print("Error: ", e)
return
def insert_to_weekday_table(dbName: str, df: pd.DataFrame, table_name: str) -> None:
"""
Parameters
----------
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName:str :
df:pd.DataFrame :
table_name:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
# df = preprocess_df(df)
for _, row in df.iterrows():
sqlQuery = f"""INSERT INTO {table_name} (ID,hour,minute,second,Unnamed: 4,totalflow)
VALUES(%s, %s, %s, %s, %s, %s);"""
data = (row[0], row[1], row[2], row[3], row[4], row[5])
try:
# Execute the SQL command
cur.execute(sqlQuery, data)
# Commit your changes in the database
conn.commit()
print("Data Inserted Successfully")
except Exception as e:
conn.rollback()
print("Error: ", e)
return
def insert_to_I80_median_table(dbName: str, df: pd.DataFrame, table_name: str) -> None:
"""
Parameters
----------
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName:str :
df:pd.DataFrame :
table_name:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
# df = preprocess_df(df)
for _, row in df.iterrows():
sqlQuery = f"""INSERT INTO {table_name} (ID,weekday,hour,minute,second,flow1,occupancy1,mph1,flow2,
occupancy2,mph2,flow3,occupancy3,mph3,flow4,occupancy4,mph4,flow5,occupancy5,mph5,totalflow)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
data = (row[0], row[1], row[2], row[3], (row[4]), (row[5]), row[6], row[7], row[8], row[9], row[10], row[11],
row[12], row[13], row[14], row[15],row[16], row[17], row[18], row[19], row[20])
try:
# Execute the SQL command
cur.execute(sqlQuery, data)
# Commit your changes in the database
conn.commit()
print("Data Inserted Successfully")
except Exception as e:
conn.rollback()
print("Error: ", e)
return
def insert_to_I80_stations_table(dbName: str, df: pd.DataFrame, table_name: str) -> None:
"""
Parameters
----------
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName :
str:
df :
pd.DataFrame:
table_name :
str:
dbName:str :
df:pd.DataFrame :
table_name:str :
Returns
-------
"""
conn, cur = DBConnect(dbName)
# df = preprocess_df(df)
for _, row in df.iterrows():
sqlQuery = f"""INSERT INTO {table_name} (ID,Fwy,Dir,District,County,City,State_PM,Abs_PM,Latitude,
Longitude,Length,Type,Lanes,Name,User_ID_1,User_ID_2,User_ID_3,User_ID_4)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
data = (row[0], row[1], row[2], row[3], (row[4]), (row[5]), row[6], row[7], row[8], row[9], row[10], row[11],
row[12], row[13], row[14], row[15],row[16], row[17])
try:
# Execute the SQL command
cur.execute(sqlQuery, data)
# Commit your changes in the database
conn.commit()
print("Data Inserted Successfully")
except Exception as e:
conn.rollback()
print("Error: ", e)
return
def db_execute_fetch(*args, many=False, tablename='', rdf=True, **kwargs) -> pd.DataFrame:
"""
Parameters
----------
*args :
many :
(Default value = False)
tablename :
(Default value = '')
rdf :
(Default value = True)
**kwargs :
Returns
-------
"""
connection, cursor1 = DBConnect(**kwargs)
if many:
cursor1.executemany(*args)
else:
cursor1.execute(*args)
# get column names
field_names = [i[0] for i in cursor1.description]
# get column values
res = cursor1.fetchall()
# get row count and show info
nrow = cursor1.rowcount
if tablename:
print(f"{nrow} records fetched from {tablename} table")
cursor1.close()
connection.close()
# return result
if rdf:
return pd.DataFrame(res, columns=field_names)
else:
return res
if __name__ == "__main__":
createDB(dbName='I80_davis')
emojiDB(dbName='I80_davis')
createTables(dbName='I80_davis')
richards = pd.read_csv('data/richards.csv')
I80_stations = pd.read_csv('data/I80_stations.csv')
station = | pd.read_csv('data/station_summary.csv') | pandas.read_csv |
import itertools
import warnings
import networkx as nx
import numpy as np
import pandas as pd
from tqdm import tqdm
from AppGenerator import AppGenerator
from ServerlessAppWorkflow import ServerlessAppWorkflow
warnings.filterwarnings("ignore")
class PerfOpt:
def __init__(self, Appworkflow, generate_perf_profile=True, mem_list=None):
self.App = Appworkflow
self.appgenerator = AppGenerator(seed=16, type='4PL')
if mem_list is None:
self.mem_list = [128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1088, 1152,
1216,
1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920, 1984, 2048, 2112, 2176,
2240,
2304, 2368, 2432, 2496, 2560, 2624, 2688, 2752, 2816, 2880, 2944, 3008]
else:
self.mem_list = mem_list
if generate_perf_profile:
self.generate_perf_profile()
self.minimal_mem_configuration, self.maximal_mem_configuration, self.maximal_cost, self.minimal_avg_rt, self.minimal_cost, self.maximal_avg_rt = self.get_optimization_boundary()
self.update_BCR()
self.all_simple_paths = [path for path in
nx.all_simple_paths(self.App.deloopedG, self.App.startPoint, self.App.endPoint)]
self.simple_paths_num = len(self.all_simple_paths)
self.CPcounter = 0
# Generate performance curve for each node in the workflow
def generate_perf_profile(self):
node_list = [item for item in self.App.workflowG.nodes]
node_list.remove('Start')
node_list.remove('End')
nx.set_node_attributes(self.App.workflowG, {}, 'perf_profile')
for node in node_list:
self.App.workflowG.nodes[node]['perf_profile'] = self.appgenerator.gen_rt_mem_data(node)
# Update mem and rt attributes of each node in the workflow
def update_mem_rt(self, G, mem_dict):
for node in mem_dict:
G.nodes[node]['mem'] = mem_dict[node]
G.nodes[node]['rt'] = G.nodes[node]['perf_profile'][mem_dict[node]]
# Update mem and rt attributes of each node in the workflow
def update_App_workflow_mem_rt(self, App, mem_dict):
self.update_mem_rt(App.workflowG, mem_dict)
App.updateRT()
def get_perf_cost_table(self, file, start_iterations=1, end_iterations=None):
'''
Enumerate all possible combinations of memory. For each combination, calculate the end-to-end response time and average cost.
Save the results into a csv.
Args:
file (string): the name of the output csv to be saved
start_iterations (int): the start iterations e.g. 1 == start from the first iteration, 2 == start from the second iteration
end_iterations (int): the end iterations e.g. 10 == end after finishing the 10th iteration
'''
data = pd.DataFrame()
self.App.update_NE()
node_list = [item for item in self.App.workflowG.nodes]
node_list.remove('Start')
node_list.remove('End')
all_available_mem_list = []
for node in node_list:
all_available_mem_list.append(
[item for item in np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))])
if (end_iterations != None):
task_size = end_iterations - start_iterations + 1
else:
task_size = np.prod([len(item) for item in all_available_mem_list]) - start_iterations + 1
mem_configurations = itertools.product(*all_available_mem_list)
for i in range(start_iterations - 1):
next(mem_configurations)
iterations_count = start_iterations - 1
print('Get Performance Cost Table - Task Size: {}'.format(task_size))
if (end_iterations != None):
with tqdm(total=task_size) as pbar:
for mem_config in mem_configurations:
iterations_count += 1
current_mem_config = dict(zip(node_list, mem_config))
self.update_App_workflow_mem_rt(self.App, current_mem_config)
current_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
current_rt = self.App.get_avg_rt()
aRow = current_mem_config
aRow['Cost'] = current_cost
aRow['RT'] = current_rt
aRow = pd.Series(aRow).rename(iterations_count)
data = data.append(aRow)
pbar.update()
if (iterations_count >= end_iterations):
break
else:
with tqdm(total=task_size) as pbar:
for mem_config in mem_configurations:
iterations_count += 1
current_mem_config = dict(zip(node_list, mem_config))
self.update_App_workflow_mem_rt(self.App, current_mem_config)
current_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
current_rt = self.App.get_avg_rt()
aRow = current_mem_config
aRow['Cost'] = current_cost
aRow['RT'] = current_rt
aRow = pd.Series(aRow).rename(iterations_count)
data = data.append(aRow)
pbar.update()
data.to_csv(file, index=True)
def get_optimization_boundary(self):
node_list = [item for item in self.App.workflowG.nodes]
minimal_mem_configuration = {node: min(self.App.workflowG.nodes[node]['perf_profile'].keys()) for node in
node_list}
maximal_mem_configuration = {node: max(self.App.workflowG.nodes[node]['perf_profile'].keys()) for node in
node_list}
self.App.update_NE()
self.update_App_workflow_mem_rt(self.App, maximal_mem_configuration)
maximal_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
minimal_avg_rt = self.App.get_avg_rt()
self.update_App_workflow_mem_rt(self.App, minimal_mem_configuration)
minimal_cost = self.App.get_avg_cost()
self.App.get_simple_dag()
maximal_avg_rt = self.App.get_avg_rt()
return (minimal_mem_configuration, maximal_mem_configuration, maximal_cost, minimal_avg_rt, minimal_cost,
maximal_avg_rt)
# Get the Benefit Cost Ratio (absolute value) of each function
def update_BCR(self):
node_list = [item for item in self.App.workflowG.nodes]
for node in node_list:
available_mem_list = [item for item in np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))]
available_rt_list = [self.App.workflowG.nodes[node]['perf_profile'][item] for item in available_mem_list]
slope, intercept = np.linalg.lstsq(np.vstack([available_mem_list, np.ones(len(available_mem_list))]).T,
np.array(available_rt_list), rcond=None)[0]
self.App.workflowG.nodes[node]['BCR'] = np.abs(slope)
# Find the probability refined critical path in self.App
def find_PRCP(self, order=0, leastCritical=False):
self.CPcounter += 1
tp_list = self.App.getTP(self.App.deloopedG, self.all_simple_paths)
rt_list = self.App.sumRT_with_NE(self.all_simple_paths, includeStartNode=True, includeEndNode=True)
prrt_list = np.multiply(tp_list, rt_list)
if (leastCritical):
PRCP = np.argsort(prrt_list)[order]
else:
PRCP = np.argsort(prrt_list)[-1 - order]
return (self.all_simple_paths[PRCP])
# Update the list of available memory configurations in ascending order
def update_available_mem_list(self, BCR=False, BCRthreshold=0.1, BCRinverse=False):
node_list = [item for item in self.App.workflowG.nodes]
for node in node_list:
if (BCR):
available_mem_list = [item for item in
np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))]
mem_zip = [item for item in zip(available_mem_list, available_mem_list[1:])]
if (BCRinverse):
available_mem_list = [item for item in mem_zip if np.abs((item[1] - item[0]) / (
self.App.workflowG.nodes[node]['perf_profile'][item[1]] -
self.App.workflowG.nodes[node]['perf_profile'][item[0]])) > 1.0 / (
self.App.workflowG.nodes[node]['BCR']) * BCRthreshold]
else:
available_mem_list = [item for item in mem_zip if np.abs((self.App.workflowG.nodes[node][
'perf_profile'][item[1]] -
self.App.workflowG.nodes[node][
'perf_profile'][item[0]]) / (
item[1] - item[0])) >
self.App.workflowG.nodes[node]['BCR'] * BCRthreshold]
available_mem_list = list(np.sort(list(set(itertools.chain(*available_mem_list)))))
else:
available_mem_list = [item for item in
np.sort(list(self.App.workflowG.nodes[node]['perf_profile'].keys()))]
self.App.workflowG.nodes[node]['available_mem'] = available_mem_list # Sorted list
def PRCPG_BPBC(self, budget, BCR=False, BCRtype="RT/M", BCRthreshold=0.1):
'''
Probability Refined Critical Path Algorithm - Minimal end-to-end response time under a budget constraint
Best Performance under budget constraint
Args:
budget (float): the budge constraint
BCR (bool): True - use benefit-cost ratio optimization False - not use BCR optimization
BCRtype (string): 'RT/M' - Benefit is RT, Cost is Mem. Eliminate mem configurations which do not conform to BCR limitations.
The greedy strategy is to select the config with maximal RT reduction.
'ERT/C' - Benefit is the reduction on end-to-end response time, Cost is increased cost.
The greedy strategy is to select the config with maximal RT reduction.
'MAX' - Benefit is the reduction on end-to-end response time, Cost is increased cost.
The greedy strategy is to select the config with maximal BCR
BCRthreshold (float): The threshold of BCR cut off
'''
if BCRtype == 'rt-mem':
BCRtype = 'RT/M'
elif BCRtype == 'e2ert-cost':
BCRtype = 'ERT/C'
elif BCRtype == 'max':
BCRtype = 'MAX'
if (BCR and BCRtype == "RT/M"):
self.update_available_mem_list(BCR=True, BCRthreshold=BCRthreshold, BCRinverse=False)
else:
self.update_available_mem_list(BCR=False)
if (BCR):
cost = self.minimal_cost
cost = self.minimal_cost
surplus = budget - cost
self.update_App_workflow_mem_rt(self.App, self.minimal_mem_configuration)
current_avg_rt = self.maximal_avg_rt
current_cost = self.minimal_cost
last_e2ert_cost_BCR = 0
order = 0
iterations_count = 0
while (round(surplus, 4) >= 0):
iterations_count += 1
cp = self.find_PRCP(order=order, leastCritical=False)
max_avg_rt_reduction_of_each_node = {}
mem_backup = nx.get_node_attributes(self.App.workflowG, 'mem')
for node in cp:
avg_rt_reduction_of_each_mem_config = {}
for mem in reversed(self.App.workflowG.nodes[node]['available_mem']):
if (mem <= mem_backup[node]):
break
self.update_App_workflow_mem_rt(self.App, {node: mem})
increased_cost = self.App.get_avg_cost() - current_cost
if (increased_cost < surplus):
self.App.get_simple_dag()
rt_reduction = current_avg_rt - self.App.get_avg_rt()
if (rt_reduction > 0):
avg_rt_reduction_of_each_mem_config[mem] = (rt_reduction, increased_cost)
self.update_App_workflow_mem_rt(self.App, {node: mem_backup[node]})
if (BCR and BCRtype == "ERT/C"):
avg_rt_reduction_of_each_mem_config = {item: avg_rt_reduction_of_each_mem_config[item] for item in
avg_rt_reduction_of_each_mem_config.keys() if
avg_rt_reduction_of_each_mem_config[item][0] /
avg_rt_reduction_of_each_mem_config[item][
1] > last_e2ert_cost_BCR * BCRthreshold}
if (BCR and BCRtype == "MAX"):
avg_rt_reduction_of_each_mem_config = {item: (
avg_rt_reduction_of_each_mem_config[item][0], avg_rt_reduction_of_each_mem_config[item][1],
avg_rt_reduction_of_each_mem_config[item][0] / avg_rt_reduction_of_each_mem_config[item][1]) for
item in avg_rt_reduction_of_each_mem_config.keys()}
if (len(avg_rt_reduction_of_each_mem_config) != 0):
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[2] for item in avg_rt_reduction_of_each_mem_config.values()])
max_rt_reduction_under_MAX_BCR = np.max(
[item[0] for item in avg_rt_reduction_of_each_mem_config.values() if
item[2] == max_BCR])
min_increased_cost_under_MAX_rt_reduction_MAX_BCR = np.min(
[item[1] for item in avg_rt_reduction_of_each_mem_config.values() if
item[0] == max_rt_reduction_under_MAX_BCR and item[2] == max_BCR])
reversed_dict = dict(zip(avg_rt_reduction_of_each_mem_config.values(),
avg_rt_reduction_of_each_mem_config.keys()))
max_avg_rt_reduction_of_each_node[node] = (reversed_dict[(
max_rt_reduction_under_MAX_BCR, min_increased_cost_under_MAX_rt_reduction_MAX_BCR,
max_BCR)],
max_rt_reduction_under_MAX_BCR,
min_increased_cost_under_MAX_rt_reduction_MAX_BCR,
max_BCR)
else:
max_rt_reduction = np.max([item[0] for item in avg_rt_reduction_of_each_mem_config.values()])
min_increased_cost_under_MAX_rt_reduction = np.min(
[item[1] for item in avg_rt_reduction_of_each_mem_config.values() if
item[0] == max_rt_reduction])
reversed_dict = dict(zip(avg_rt_reduction_of_each_mem_config.values(),
avg_rt_reduction_of_each_mem_config.keys()))
max_avg_rt_reduction_of_each_node[node] = (
reversed_dict[(max_rt_reduction, min_increased_cost_under_MAX_rt_reduction)],
max_rt_reduction,
min_increased_cost_under_MAX_rt_reduction)
if (len(max_avg_rt_reduction_of_each_node) == 0):
if (order >= self.simple_paths_num - 1):
break
else:
order += 1
continue
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[3] for item in max_avg_rt_reduction_of_each_node.values()])
max_rt_reduction_under_MAX_BCR = np.max(
[item[1] for item in max_avg_rt_reduction_of_each_node.values() if item[3] == max_BCR])
target_node = [key for key in max_avg_rt_reduction_of_each_node if
max_avg_rt_reduction_of_each_node[key][3] == max_BCR and
max_avg_rt_reduction_of_each_node[key][1] == max_rt_reduction_under_MAX_BCR][0]
target_mem = max_avg_rt_reduction_of_each_node[target_node][0]
else:
max_rt_reduction = np.max([item[1] for item in max_avg_rt_reduction_of_each_node.values()])
min_increased_cost_under_MAX_rt_reduction = np.min(
[item[2] for item in max_avg_rt_reduction_of_each_node.values() if item[1] == max_rt_reduction])
target_mem = np.min([item[0] for item in max_avg_rt_reduction_of_each_node.values() if
item[1] == max_rt_reduction and item[
2] == min_increased_cost_under_MAX_rt_reduction])
target_node = [key for key in max_avg_rt_reduction_of_each_node if
max_avg_rt_reduction_of_each_node[key] == (
target_mem, max_rt_reduction, min_increased_cost_under_MAX_rt_reduction)][0]
self.update_App_workflow_mem_rt(self.App, {target_node: target_mem})
max_rt_reduction = max_avg_rt_reduction_of_each_node[target_node][1]
min_increased_cost_under_MAX_rt_reduction = max_avg_rt_reduction_of_each_node[target_node][2]
current_avg_rt = current_avg_rt - max_rt_reduction
surplus = surplus - min_increased_cost_under_MAX_rt_reduction
current_cost = self.App.get_avg_cost()
current_e2ert_cost_BCR = max_rt_reduction / min_increased_cost_under_MAX_rt_reduction
if (current_e2ert_cost_BCR == float('Inf')):
last_e2ert_cost_BCR = 0
else:
last_e2ert_cost_BCR = current_e2ert_cost_BCR
current_mem_configuration = nx.get_node_attributes(self.App.workflowG, 'mem')
del current_mem_configuration['Start']
del current_mem_configuration['End']
print('Optimized Memory Configuration: {}'.format(current_mem_configuration))
print('Average end-to-end response time: {}'.format(current_avg_rt))
print('Average Cost: {}'.format(current_cost))
print('PRCP_BPBC Optimization Completed.')
return (current_avg_rt, current_cost, current_mem_configuration, iterations_count)
def PRCPG_BCPC(self, rt_constraint, BCR=False, BCRtype="M/RT", BCRthreshold=0.1):
'''
Probability Refined Critical Path Algorithm - Minimal cost under an end-to-end response time constraint
Best cost under performance (end-to-end response time) constraint
Args:
rt_constraint (float): End-to-end response time constraint
BCR (bool): True - use benefit-cost ratio optimization False - not use BCR optimization
BCRtype (string): 'M/RT' - Benefit is Mem, Cost is RT. (inverse) Eliminate mem configurations which do not conform to BCR limitations
'C/ERT' - Benefit is the cost reduction, Cost is increased ERT.
'MAX' - Benefit is the cost reduction, Cost is increased ERT. The greedy strategy is to select the config with maximal BCR
BCRthreshold (float): The threshold of BCR cut off
'''
if BCRtype == 'rt-mem':
BCRtype = 'M/RT'
elif BCRtype == 'e2ert-cost':
BCRtype = 'C/ERT'
elif BCRtype == 'max':
BCRtype = 'MAX'
if (BCR and BCRtype == "M/RT"):
self.update_available_mem_list(BCR=True, BCRthreshold=BCRthreshold, BCRinverse=True)
else:
self.update_available_mem_list(BCR=False)
self.update_App_workflow_mem_rt(self.App, self.maximal_mem_configuration)
current_avg_rt = self.minimal_avg_rt
performance_surplus = rt_constraint - current_avg_rt
current_cost = self.maximal_cost
last_e2ert_cost_BCR = 0
order = 0
iterations_count = 0
while (round(performance_surplus, 4) >= 0):
iterations_count += 1
cp = self.find_PRCP(leastCritical=True, order=order)
max_cost_reduction_of_each_node = {}
mem_backup = nx.get_node_attributes(self.App.workflowG, 'mem')
for node in cp:
cost_reduction_of_each_mem_config = {}
for mem in self.App.workflowG.nodes[node][
'available_mem']:
if (mem >= mem_backup[node]):
break
self.update_App_workflow_mem_rt(self.App, {node: mem})
self.App.get_simple_dag()
temp_avg_rt = self.App.get_avg_rt()
increased_rt = temp_avg_rt - current_avg_rt
cost_reduction = current_cost - self.App.get_avg_cost()
if (increased_rt < performance_surplus and cost_reduction > 0):
cost_reduction_of_each_mem_config[mem] = (cost_reduction, increased_rt)
self.update_App_workflow_mem_rt(self.App, {node: mem_backup[node]})
if (BCR and BCRtype == 'C/ERT'):
cost_reduction_of_each_mem_config = {item: cost_reduction_of_each_mem_config[item] for item in
cost_reduction_of_each_mem_config.keys() if
cost_reduction_of_each_mem_config[item][0] /
cost_reduction_of_each_mem_config[item][
1] > last_e2ert_cost_BCR * BCRthreshold}
elif (BCR and BCRtype == "MAX"):
cost_reduction_of_each_mem_config = {item: (
cost_reduction_of_each_mem_config[item][0], cost_reduction_of_each_mem_config[item][1],
cost_reduction_of_each_mem_config[item][0] / cost_reduction_of_each_mem_config[item][1]) for
item in
cost_reduction_of_each_mem_config.keys()}
if (len(cost_reduction_of_each_mem_config) != 0):
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[2] for item in cost_reduction_of_each_mem_config.values()])
max_cost_reduction_under_MAX_BCR = np.max(
[item[0] for item in cost_reduction_of_each_mem_config.values() if
item[2] == max_BCR])
min_increased_rt_under_MAX_rt_reduction_MAX_BCR = np.min(
[item[1] for item in cost_reduction_of_each_mem_config.values() if
item[0] == max_cost_reduction_under_MAX_BCR and item[2] == max_BCR])
reversed_dict = dict(zip(cost_reduction_of_each_mem_config.values(),
cost_reduction_of_each_mem_config.keys()))
max_cost_reduction_of_each_node[node] = (reversed_dict[(
max_cost_reduction_under_MAX_BCR, min_increased_rt_under_MAX_rt_reduction_MAX_BCR,
max_BCR)],
max_cost_reduction_under_MAX_BCR,
min_increased_rt_under_MAX_rt_reduction_MAX_BCR,
max_BCR)
else:
max_cost_reduction = np.max([item[0] for item in cost_reduction_of_each_mem_config.values()])
min_increased_rt_under_MAX_cost_reduction = np.min(
[item[1] for item in cost_reduction_of_each_mem_config.values() if
item[0] == max_cost_reduction])
reversed_dict = dict(
zip(cost_reduction_of_each_mem_config.values(), cost_reduction_of_each_mem_config.keys()))
max_cost_reduction_of_each_node[node] = (
reversed_dict[(max_cost_reduction, min_increased_rt_under_MAX_cost_reduction)],
max_cost_reduction,
min_increased_rt_under_MAX_cost_reduction)
if (len(max_cost_reduction_of_each_node) == 0):
if (order >= self.simple_paths_num - 1):
break
else:
order += 1
continue
if (BCR and BCRtype == "MAX"):
max_BCR = np.max([item[3] for item in max_cost_reduction_of_each_node.values()])
max_cost_reduction_under_MAX_BCR = np.max(
[item[1] for item in max_cost_reduction_of_each_node.values() if item[3] == max_BCR])
target_node = [key for key in max_cost_reduction_of_each_node if
max_cost_reduction_of_each_node[key][3] == max_BCR and
max_cost_reduction_of_each_node[key][1] == max_cost_reduction_under_MAX_BCR][0]
target_mem = max_cost_reduction_of_each_node[target_node][0]
else:
max_cost_reduction = np.max([item[1] for item in max_cost_reduction_of_each_node.values()])
min_increased_rt_under_MAX_cost_reduction = np.min(
[item[2] for item in max_cost_reduction_of_each_node.values() if item[1] == max_cost_reduction])
target_mem = np.min([item[0] for item in max_cost_reduction_of_each_node.values() if
item[1] == max_cost_reduction and item[
2] == min_increased_rt_under_MAX_cost_reduction])
target_node = [key for key in max_cost_reduction_of_each_node if
max_cost_reduction_of_each_node[key] == (
target_mem, max_cost_reduction, min_increased_rt_under_MAX_cost_reduction)][0]
self.update_App_workflow_mem_rt(self.App, {target_node: target_mem})
max_cost_reduction = max_cost_reduction_of_each_node[target_node][1]
min_increased_rt_under_MAX_cost_reduction = max_cost_reduction_of_each_node[target_node][2]
current_cost = current_cost - max_cost_reduction
performance_surplus = performance_surplus - min_increased_rt_under_MAX_cost_reduction
current_avg_rt = current_avg_rt + min_increased_rt_under_MAX_cost_reduction
current_e2ert_cost_BCR = max_cost_reduction / min_increased_rt_under_MAX_cost_reduction
if (current_e2ert_cost_BCR == float('Inf')):
last_e2ert_cost_BCR = 0
else:
last_e2ert_cost_BCR = current_e2ert_cost_BCR
current_mem_configuration = nx.get_node_attributes(self.App.workflowG, 'mem')
del current_mem_configuration['Start']
del current_mem_configuration['End']
print('Optimized Memory Configuration: {}'.format(current_mem_configuration))
print('Average end-to-end response time: {}'.format(current_avg_rt))
print('Average Cost: {}'.format(current_cost))
print('PRCPG_BCPC Optimization Completed.')
return (current_avg_rt, current_cost, current_mem_configuration, iterations_count)
def get_opt_curve(self, filenameprefix, budget_list, performance_constraint_list, BCRthreshold=0.2):
'''
Get the Optimization Curve and save as csv
Args:
nop_cost (int): the number of evenly spaced budgets in the range of Cost
nop_rt (int): the number of evenly spaced performance constraints in the range of RT
'''
BPBC_data = pd.DataFrame()
for budget in budget_list:
aRow = {'Budget': budget, 'BCR_threshold': BCRthreshold}
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=False)
aRow['BCR_disabled_RT'] = rt
aRow['BCR_disabled_Cost'] = cost
aRow['BCR_disabled_Config'] = config
aRow['BCR_disabled_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=True, BCRtype='RT/M',
BCRthreshold=BCRthreshold)
aRow['BCR_RT/M_RT'] = rt
aRow['BCR_RT/M_Cost'] = cost
aRow['BCR_RT/M_Config'] = config
aRow['BCR_RT/M_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=True, BCRtype='ERT/C',
BCRthreshold=BCRthreshold)
aRow['BCR_ERT/C_RT'] = rt
aRow['BCR_ERT/C_Cost'] = cost
aRow['BCR_ERT/C_Config'] = config
aRow['BCR_ERT/C_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BPBC(budget, BCR=True, BCRtype='MAX')
aRow['BCR_MAX_RT'] = rt
aRow['BCR_MAX_Cost'] = cost
aRow['BCR_MAX_Config'] = config
aRow['BCR_MAX_Iterations'] = iterations
aRow = pd.Series(aRow)
BPBC_data = BPBC_data.append(aRow, ignore_index=True)
BPBC_data = BPBC_data[
['Budget', 'BCR_disabled_RT', 'BCR_RT/M_RT', 'BCR_ERT/C_RT', 'BCR_MAX_RT', 'BCR_disabled_Cost',
'BCR_RT/M_Cost', 'BCR_ERT/C_Cost', 'BCR_MAX_Cost', 'BCR_disabled_Config', 'BCR_RT/M_Config',
'BCR_ERT/C_Config', 'BCR_MAX_Config', 'BCR_disabled_Iterations', 'BCR_RT/M_Iterations',
'BCR_ERT/C_Iterations', 'BCR_MAX_Iterations', 'BCR_threshold']]
BPBC_data.to_csv(filenameprefix + '_BPBC.csv', index=False)
BCPC_data = pd.DataFrame()
for perf_constraint in performance_constraint_list:
aRow = {'Performance_Constraint': perf_constraint, 'BCR_threshold': BCRthreshold}
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=False)
aRow['BCR_disabled_RT'] = rt
aRow['BCR_disabled_Cost'] = cost
aRow['BCR_disabled_Config'] = config
aRow['BCR_disabled_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=True, BCRtype='RT/M',
BCRthreshold=BCRthreshold)
aRow['BCR_M/RT_RT'] = rt
aRow['BCR_M/RT_Cost'] = cost
aRow['BCR_M/RT_Config'] = config
aRow['BCR_M/RT_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=True,
BCRtype='ERT/C', BCRthreshold=BCRthreshold)
aRow['BCR_C/ERT_RT'] = rt
aRow['BCR_C/ERT_Cost'] = cost
aRow['BCR_C/ERT_Config'] = config
aRow['BCR_C/ERT_Iterations'] = iterations
rt, cost, config, iterations = self.PRCPG_BCPC(rt_constraint=perf_constraint, BCR=True, BCRtype='MAX')
aRow['BCR_MAX_RT'] = rt
aRow['BCR_MAX_Cost'] = cost
aRow['BCR_MAX_Config'] = config
aRow['BCR_MAX_Iterations'] = iterations
aRow = | pd.Series(aRow) | pandas.Series |
# Copyright 2021 Rikai Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from rikai.pytorch.pandas import PandasDataset
from rikai.types.geometry import Box2d
def test_pandas_dataframe():
data = [{"id": i, "value": str(i * 10)} for i in range(10)]
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
# Part of localization phase
# suspected bug detection:
# 1. Tensorflow,Theano,CNTK
# 2. Tensorflow,Theano,MXNET
#
# voting process
# -> a. inconsistency -> error backend,error layer.
# b. check error backend in new container(whether inconsistency disappears).
# """
#
import numpy as np
import os
import sys
import configparser
from scripts.tools.filter_bugs import filter_bugs
import pickle
import pandas as pd
from itertools import combinations,product
from datetime import datetime
np.random.seed(20200501)
def get_model_inputs(idntfrs):
idntfrs_set = set()
for idntfr in idntfrs:
idntfr_splits = idntfr.split("_")
model_input = "{}_{}_{}".format(idntfr_splits[0], idntfr_splits[1], idntfr_splits[-1])
idntfrs_set.add(model_input)
return idntfrs_set
def voted_by_inconsistency(res_dict):
votes = {}
for back_pair,incon_value in res_dict.items():
if incon_value >= threshold:
back1,back2 = back_pair.split("_")
votes[back1] = votes.setdefault(back1, 0) + 1
votes[back2] = votes.setdefault(back2, 0) + 1
for bk, cnt in votes.items():
if cnt == 2:
return bk
def get_metrics_of_exp(exp_id,exp_dir):
res_dict = dict()
metrics_dir = os.path.join(exp_dir, "metrics_result")
file_name = f"{exp_id}_D_MAD_result.csv"
with open(os.path.join(metrics_dir, file_name), "r") as fr:
fr.readline()
for line in fr.readlines():
line = line.rstrip("\n").split(",")
res_dict[line[0]] = float(line[1])
return res_dict
def get_metrics_of_model_input(model_input,backend_pairs,exp_metrics_dict):
mis = model_input.split("_")
res_dict = dict()
for backend_pair in backend_pairs:
model_idntfr = f"{mis[0]}_{mis[1]}_{backend_pair}_{mis[-1]}"
res_dict[backend_pair] = exp_metrics_dict[model_idntfr]
return res_dict
def choose_error_layer(localize_res: dict):
def get_common_layer(res, error_layer):
bk_dict = {}
for bk_pair, tu in res.items():
if tu[0] == error_layer:
bk1, bk2 = bk_pair.split("_")[0], bk_pair.split("_")[1]
bk_dict[bk1] = bk_dict.setdefault(bk1, 0) + 1
bk_dict[bk2] = bk_dict.setdefault(bk2, 0) + 1
for bk, cnt in bk_dict.items():
if cnt == 2:
return bk
layers_list = list(localize_res.values())
unique_layer_dict = dict()
for layer_tuple in layers_list:
unique_layer_dict[layer_tuple[0]] = unique_layer_dict.setdefault(layer_tuple[0], 0) + 1
unique_layer_dict = list(unique_layer_dict.items())
if len(unique_layer_dict) == 1:
return unique_layer_dict[0][0], "-".join(backends)
if len(unique_layer_dict) == 2:
error_layer = unique_layer_dict[0][0] if unique_layer_dict[0][1] == 2 else unique_layer_dict[1][0]
return error_layer, get_common_layer(localize_res, error_layer)
if len(unique_layer_dict) == 3:
return None, None
def get_layer_values(bk_res: dict, layer_name):
values = list()
for bk_p in backend_pairs:
df = bk_res[bk_p]
select_row = df[df['current_layer'].isin([layer_name])]
values.append("{}:{}".format(bk_p, select_row['Rl'].values[0]))
return "|".join(values)
def get_rate(value:str):
"""
v:values:'tensorflow_theano:325317.28125-tensorflow_cntk:325317.28125-tensorflow_mxnet:325317.28125-theano_cntk:0.07708668-theano_mxnet:0.09217975-cntk_mxnet:0.0887682'
rate: max_Rl
"""
if 'inf' in value:
return 'inf'
else:
try:
value_splits = value.split("|")
value_list = [abs(float(val.split(":")[1])) for val in value_splits]
except ValueError as e:
print(value)
raise e
max_rl,min_rl = max(value_list),min(value_list)
return max_rl / (min_rl + 1e-10)
def update_suspected_bugs(res_dict:dict, row:dict):
"""
select suspected bugs from inconsistencies by their rate
rate: max_Rl / min_Rl
row is like: {error_backend:theano,error_layer:conv2d_copy_LA1,
model_input:'alexnet-cifar10_origin0-NAI1-LS6-WS4-NS1-ARep8_input228',
values:'tensorflow_theano:325317.28125-tensorflow_cntk:325317.28125-tensorflow_mxnet:325317.28125-theano_cntk:0.07708668-theano_mxnet:0.09217975-cntk_mxnet:0.0887682'}
"""
# if not exists;add
# else update
error_bk,layer_name = row['error_backend'],simplify_layer_name(row['error_layer'])
if (error_bk,layer_name) not in res_dict.keys():
res_dict[(error_bk,layer_name)] = set()
res_dict[(error_bk,layer_name)].add(row['model_input'])
# print(row['error_layer'],simplify_layer_name(row['error_layer']))
return res_dict
def simplify_layer_name(layer_name:str):
"""
simplify layer name 'conv2d_copy_LA' -> conv2d
"""
if 'copy' in layer_name:
layer_name = layer_name.split("_copy_")[0]
if 'insert' in layer_name:
layer_name = layer_name.split("_insert_")[0]
# '_' in str and str doesn't endwiths '_'
if "_" in layer_name:
last_chr = layer_name.rfind("_")
if last_chr == len(layer_name) -1 or layer_name[last_chr+1].isdigit():
layer_name = layer_name[:last_chr]
return layer_name
def get_inconsistency_value(bk_values):
res_list = []
for bk,values in bk_values.items():
res_list.append(f"{bk}:{values}")
return "|".join(res_list)
def get_largest_error_layer(error_bk,bk_local_res,top_layers):
def get_layer_value_other_bkp(layer_name,layer_stacks):
for idx, row in layer_stacks.iterrows():
if row['current_layer'] == layer_name:
return float(row['Rl'])
layerset = set()
layer_value_dict = dict()
error_bk_pairs = [bkp for bkp in backend_pairs if error_bk in bkp]
# length == 1
other_pair = [bkp for bkp in backend_pairs if error_bk not in bkp]
for bkp in error_bk_pairs:
layerset.add(top_layers[bkp][0])
layer_value_dict[bkp] = (top_layers[bkp][0],get_layer_value_other_bkp(top_layers[bkp][0],bk_local_res[other_pair[0]]))
if len(layerset) == 1:
return list(layerset)[0]
else:
if layer_value_dict[error_bk_pairs[0]][1] < layer_value_dict[error_bk_pairs[1]][1]:
return layer_value_dict[error_bk_pairs[0]][0]
else:
return layer_value_dict[error_bk_pairs[1]][0]
def get_higher_value_count(l):
higher_cnt = 0
for val in l:
if val >= threshold:
higher_cnt += 1
return higher_cnt
def get_another_tuple(idntfr,unique_incon_dict:list):
"""unique_incon_dict is list of ('theano_cntk','O&M',('lenet5-mnist_origin0_theano_cntk_input1',0.35))"""
idntfr_splits = idntfr.split("_")
bkp = f"{idntfr_splits[2]}_{idntfr_splits[3]}"
if idntfr_splits[1] == 'origin0':
# mutated should be added
for iu in unique_incon_dict:
iu_idntfr = iu[2][0]
iu_idntfr_splits = iu_idntfr.split("_")
if iu[0] == bkp and iu[1] =='O&M' and idntfr_splits[0] == iu_idntfr_splits[0] and iu_idntfr_splits[1] != 'origin0' and idntfr_splits[-1] == iu_idntfr_splits[-1]:
return iu[2]
else:
raise Exception(f"Can't find equivalent mutated inconsistency for {idntfr}")
else:
# origin should be added
origin_idntfr = f"{idntfr_splits[0]}_origin0_{idntfr_splits[2]}_{idntfr_splits[3]}_{idntfr_splits[-1]}"
for iu in unique_incon_dict:
if iu[0] == bkp and iu[1] == 'O&M' and origin_idntfr == iu[2][0]:
return iu[2]
else:
print(origin_idntfr)
raise Exception(f"Can't find equivalent origin inconsistency for {idntfr}")
def is_all_original(model_inputs):
for mi in model_inputs:
mi_splits = mi.split("_")
if mi_splits[1] != 'origin0':
return False
else:
return True
def is_all_original_on_exp(model_inputs,exp):
for mi in model_inputs:
mi_splits = mi.split("_")
if mi_splits[0] == exp and mi_splits[1] != 'origin0':
return False
else:
return True
def is_all_mutant(model_inputs):
for mi in model_inputs:
mi_splits = mi.split("_")
if mi_splits[1] == 'origin0':
return False
else:
return True
def is_all_mutant_on_exp(model_inputs,exp):
for mi in model_inputs:
mi_splits = mi.split("_")
if mi_splits[0] == exp and mi_splits[1] == 'origin0':
return False
else:
return True
def is_exp_bug(model_inputs,exp):
for mi in model_inputs:
mi_splits = mi.split("_")
if mi_splits[0] == exp:
return True
else:
return False
if __name__ == '__main__':
start_time = datetime.now()
config_name = sys.argv[1]
lemon_cfg = configparser.ConfigParser()
lemon_cfg.read(f"./config/{config_name}")
parameters = lemon_cfg['parameters']
output_dir = parameters['output_dir']
output_dir = output_dir[:-1] if output_dir.endswith("/") else output_dir
threshold = parameters.getfloat('threshold')
current_container = output_dir.rstrip("/").split("/")[-1]
backend_choices = [1, 2, 3]
print("current_container",current_container)
exps = parameters['exps'].lstrip().rstrip().split(" ")
exps.sort(key=lambda x:x)
global_backend_pairs = [f"{pair[0]}_{pair[1]}" for pair in combinations(['tensorflow', 'theano', 'cntk','mxnet'], 2)]
pd_exps = list()
success_cnt = fail_cnt = 0
fail_model_inputs = list()
reduced_bugs = dict()
columns_cnt = int(3*(len(exps) + 1))
content = np.zeros((6,columns_cnt),dtype='int64')
# create an empty DataFrame
dict_exps = list()
for e in exps:
dict_exps.append(f"{e}+O-M")
dict_exps.append(f"{e}+M-O")
dict_exps.append(f"{e}+O&M")
pd_exps.append(f"{e}+LE")
pd_exps.append(f"{e}+Mu")
pd_exps.append(f"{e}+In")
pd_exps.append(f"Total+LE")
pd_exps.append(f"Total+Mu")
pd_exps.append(f"Total+In")
bug_analysis_keys = list(product(dict_exps, global_backend_pairs))
exp_bkp_tuples = list(product(pd_exps, global_backend_pairs))
bug_analysis = {t:set() for t in bug_analysis_keys}
bug_df = pd.DataFrame(content,columns=pd_exps,index=global_backend_pairs)
model_input_localize = {}
for backend_choice in backend_choices:
if backend_choice == 1:
backends = ['tensorflow', 'theano', 'cntk']
elif backend_choice == 2:
backends = ['tensorflow', 'theano', 'mxnet']
else:
backends = ['tensorflow', 'cntk', 'mxnet']
backend_str = "-".join(backends)
backend_pairs = [f"{pair[0]}_{pair[1]}" for pair in combinations(backends, 2)]
"""Get all exps"""
unsolved_columns = backend_pairs.copy()
unsolved_columns.insert(0,'model_input')
unsolved_df = | pd.DataFrame(columns=unsolved_columns) | pandas.DataFrame |
import numpy as np
from scipy.special import softmax
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as tdata
import pandas as pd
import time
from tqdm import tqdm
from utils import validate, get_logits_targets, sort_sum
import pdb
# Conformalize a model with a calibration set.
# Save it to a file in .cache/modelname
# The only difference is that the forward method of ConformalModel also outputs a set.
class ConformalModel(nn.Module):
def __init__(self, model, calib_loader, alpha, kreg=None, lamda=None, randomized=True, allow_zero_sets=False, pct_paramtune = 0.3, batch_size=32, lamda_criterion='size'):
super(ConformalModel, self).__init__()
self.model = model
self.alpha = alpha
self.T = torch.Tensor([1.3]) #initialize (1.3 is usually a good value)
self.T, calib_logits = platt(self, calib_loader)
self.randomized=randomized
self.allow_zero_sets=allow_zero_sets
self.num_classes = len(calib_loader.dataset.dataset.classes)
if kreg == None or lamda == None:
kreg, lamda, calib_logits = pick_parameters(model, calib_logits, alpha, kreg, lamda, randomized, allow_zero_sets, pct_paramtune, batch_size, lamda_criterion)
self.penalties = np.zeros((1, self.num_classes))
self.penalties[:, kreg:] += lamda
calib_loader = tdata.DataLoader(calib_logits, batch_size = batch_size, shuffle=False, pin_memory=True)
self.Qhat = conformal_calibration_logits(self, calib_loader)
def forward(self, *args, randomized=None, allow_zero_sets=None, **kwargs):
if randomized == None:
randomized = self.randomized
if allow_zero_sets == None:
allow_zero_sets = self.allow_zero_sets
logits = self.model(*args, **kwargs)
with torch.no_grad():
logits_numpy = logits.detach().cpu().numpy()
scores = softmax(logits_numpy/self.T.item(), axis=1)
I, ordered, cumsum = sort_sum(scores)
S = gcq(scores, self.Qhat, I=I, ordered=ordered, cumsum=cumsum, penalties=self.penalties, randomized=randomized, allow_zero_sets=allow_zero_sets)
return logits, S
# Computes the conformal calibration
def conformal_calibration(cmodel, calib_loader):
print("Conformal calibration")
with torch.no_grad():
E = np.array([])
for x, targets in tqdm(calib_loader):
logits = cmodel.model(x.cuda()).detach().cpu().numpy()
scores = softmax(logits/cmodel.T.item(), axis=1)
I, ordered, cumsum = sort_sum(scores)
E = np.concatenate((E,giq(scores,targets,I=I,ordered=ordered,cumsum=cumsum,penalties=cmodel.penalties,randomized=True, allow_zero_sets=True)))
Qhat = np.quantile(E,1-cmodel.alpha,interpolation='higher')
return Qhat
# Temperature scaling
def platt(cmodel, calib_loader, max_iters=10, lr=0.01, epsilon=0.01):
print("Begin Platt scaling.")
# Save logits so don't need to double compute them
logits_dataset = get_logits_targets(cmodel.model, calib_loader)
logits_loader = torch.utils.data.DataLoader(logits_dataset, batch_size = calib_loader.batch_size, shuffle=False, pin_memory=True)
T = platt_logits(cmodel, logits_loader, max_iters=max_iters, lr=lr, epsilon=epsilon)
print(f"Optimal T={T.item()}")
return T, logits_dataset
"""
INTERNAL FUNCTIONS
"""
### Precomputed-logit versions of the above functions.
class ConformalModelLogits(nn.Module):
def __init__(self, model, calib_loader, alpha, kreg=None, lamda=None, randomized=True, allow_zero_sets=False, naive=False, LAC=False, pct_paramtune = 0.3, batch_size=32, lamda_criterion='size'):
super(ConformalModelLogits, self).__init__()
self.model = model
self.alpha = alpha
self.randomized = randomized
self.LAC = LAC
self.allow_zero_sets = allow_zero_sets
self.T = platt_logits(self, calib_loader)
if (kreg == None or lamda == None) and not naive and not LAC:
kreg, lamda, calib_logits = pick_parameters(model, calib_loader.dataset, alpha, kreg, lamda, randomized, allow_zero_sets, pct_paramtune, batch_size, lamda_criterion)
calib_loader = tdata.DataLoader(calib_logits, batch_size=batch_size, shuffle=False, pin_memory=True)
self.penalties = np.zeros((1, calib_loader.dataset[0][0].shape[0]))
if not (kreg == None) and not naive and not LAC:
self.penalties[:, kreg:] += lamda
self.Qhat = 1-alpha
if not naive and not LAC:
self.Qhat = conformal_calibration_logits(self, calib_loader)
elif not naive and LAC:
gt_locs_cal = np.array([np.where(np.argsort(x[0]).flip(dims=(0,)) == x[1])[0][0] for x in calib_loader.dataset])
scores_cal = 1-np.array([np.sort(torch.softmax(calib_loader.dataset[i][0]/self.T.item(), dim=0))[::-1][gt_locs_cal[i]] for i in range(len(calib_loader.dataset))])
self.Qhat = np.quantile( scores_cal , np.ceil((scores_cal.shape[0]+1) * (1-alpha)) / scores_cal.shape[0] )
def forward(self, logits, randomized=None, allow_zero_sets=None):
if randomized == None:
randomized = self.randomized
if allow_zero_sets == None:
allow_zero_sets = self.allow_zero_sets
with torch.no_grad():
logits_numpy = logits.detach().cpu().numpy()
scores = softmax(logits_numpy/self.T.item(), axis=1)
if not self.LAC:
I, ordered, cumsum = sort_sum(scores)
S = gcq(scores, self.Qhat, I=I, ordered=ordered, cumsum=cumsum, penalties=self.penalties, randomized=randomized, allow_zero_sets=allow_zero_sets)
else:
S = [ np.where( (1-scores[i,:]) < self.Qhat )[0] for i in range(scores.shape[0]) ]
return logits, S
def conformal_calibration_logits(cmodel, calib_loader):
with torch.no_grad():
E = np.array([])
for logits, targets in calib_loader:
logits = logits.detach().cpu().numpy()
scores = softmax(logits/cmodel.T.item(), axis=1)
I, ordered, cumsum = sort_sum(scores)
E = np.concatenate((E,giq(scores,targets,I=I,ordered=ordered,cumsum=cumsum,penalties=cmodel.penalties,randomized=True,allow_zero_sets=True)))
Qhat = np.quantile(E,1-cmodel.alpha,interpolation='higher')
return Qhat
def platt_logits(cmodel, calib_loader, max_iters=10, lr=0.01, epsilon=0.01):
nll_criterion = nn.CrossEntropyLoss().cuda()
T = nn.Parameter(torch.Tensor([1.3]).cuda())
optimizer = optim.SGD([T], lr=lr)
for iter in range(max_iters):
T_old = T.item()
for x, targets in calib_loader:
optimizer.zero_grad()
x = x.cuda()
x.requires_grad = True
out = x/T
loss = nll_criterion(out, targets.long().cuda())
loss.backward()
optimizer.step()
if abs(T_old - T.item()) < epsilon:
break
return T
### CORE CONFORMAL INFERENCE FUNCTIONS
# Generalized conditional quantile function.
def gcq(scores, tau, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
penalties_cumsum = np.cumsum(penalties, axis=1)
sizes_base = ((cumsum + penalties_cumsum) <= tau).sum(axis=1) + 1 # 1 - 1001
sizes_base = np.minimum(sizes_base, scores.shape[1]) # 1-1000
if randomized:
V = np.zeros(sizes_base.shape)
for i in range(sizes_base.shape[0]):
V[i] = 1/ordered[i,sizes_base[i]-1] * \
(tau-(cumsum[i,sizes_base[i]-1]-ordered[i,sizes_base[i]-1])-penalties_cumsum[0,sizes_base[i]-1]) # -1 since sizes_base \in {1,...,1000}.
sizes = sizes_base - (np.random.random(V.shape) >= V).astype(int)
else:
sizes = sizes_base
if tau == 1.0:
sizes[:] = cumsum.shape[1] # always predict max size if alpha==0. (Avoids numerical error.)
if not allow_zero_sets:
sizes[sizes == 0] = 1 # allow the user the option to never have empty sets (will lead to incorrect coverage if 1-alpha < model's top-1 accuracy
S = list()
# Construct S from equation (5)
for i in range(I.shape[0]):
S = S + [I[i,0:sizes[i]],]
return S
# Get the 'p-value'
def get_tau(score, target, I, ordered, cumsum, penalty, randomized, allow_zero_sets): # For one example
idx = np.where(I==target)
tau_nonrandom = cumsum[idx]
if not randomized:
return tau_nonrandom + penalty[0]
U = np.random.random()
if idx == (0,0):
if not allow_zero_sets:
return tau_nonrandom + penalty[0]
else:
return U * tau_nonrandom + penalty[0]
else:
return U * ordered[idx] + cumsum[(idx[0],idx[1]-1)] + (penalty[0:(idx[1][0]+1)]).sum()
# Gets the histogram of Taus.
def giq(scores, targets, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
"""
Generalized inverse quantile conformity score function.
E from equation (7) in Romano, Sesia, Candes. Find the minimum tau in [0, 1] such that the correct label enters.
"""
E = -np.ones((scores.shape[0],))
for i in range(scores.shape[0]):
E[i] = get_tau(scores[i:i+1,:],targets[i].item(),I[i:i+1,:],ordered[i:i+1,:],cumsum[i:i+1,:],penalties[0,:],randomized=randomized, allow_zero_sets=allow_zero_sets)
return E
### AUTOMATIC PARAMETER TUNING FUNCTIONS
def pick_kreg(paramtune_logits, alpha):
gt_locs_kstar = np.array([np.where(np.argsort(x[0]).flip(dims=(0,)) == x[1])[0][0] for x in paramtune_logits])
kstar = np.quantile(gt_locs_kstar, 1-alpha, interpolation='higher') + 1
return kstar
def pick_lamda_size(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets):
# Calculate lamda_star
best_size = iter(paramtune_loader).__next__()[0][1].shape[0] # number of classes
# Use the paramtune data to pick lamda. Does not violate exchangeability.
for temp_lam in [0.001, 0.01, 0.1, 0.2, 0.5]: # predefined grid, change if more precision desired.
conformal_model = ConformalModelLogits(model, paramtune_loader, alpha=alpha, kreg=kreg, lamda=temp_lam, randomized=randomized, allow_zero_sets=allow_zero_sets, naive=False)
top1_avg, top5_avg, cvg_avg, sz_avg = validate(paramtune_loader, conformal_model, print_bool=False)
if sz_avg < best_size:
best_size = sz_avg
lamda_star = temp_lam
return lamda_star
def pick_lamda_adaptiveness(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets, strata=[[0,1],[2,3],[4,6],[7,10],[11,100],[101,1000]]):
# Calculate lamda_star
lamda_star = 0
best_violation = 1
# Use the paramtune data to pick lamda. Does not violate exchangeability.
for temp_lam in [0, 1e-5, 1e-4, 8e-4, 9e-4, 1e-3, 1.5e-3, 2e-3]: # predefined grid, change if more precision desired.
conformal_model = ConformalModelLogits(model, paramtune_loader, alpha=alpha, kreg=kreg, lamda=temp_lam, randomized=randomized, allow_zero_sets=allow_zero_sets, naive=False)
curr_violation = get_violation(conformal_model, paramtune_loader, strata, alpha)
if curr_violation < best_violation:
best_violation = curr_violation
lamda_star = temp_lam
return lamda_star
def pick_parameters(model, calib_logits, alpha, kreg, lamda, randomized, allow_zero_sets, pct_paramtune, batch_size, lamda_criterion):
num_paramtune = int(np.ceil(pct_paramtune * len(calib_logits)))
paramtune_logits, calib_logits = tdata.random_split(calib_logits, [num_paramtune, len(calib_logits)-num_paramtune])
calib_loader = tdata.DataLoader(calib_logits, batch_size=batch_size, shuffle=False, pin_memory=True)
paramtune_loader = tdata.DataLoader(paramtune_logits, batch_size=batch_size, shuffle=False, pin_memory=True)
if kreg == None:
kreg = pick_kreg(paramtune_logits, alpha)
if lamda == None:
if lamda_criterion == "size":
lamda = pick_lamda_size(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets)
elif lamda_criterion == "adaptiveness":
lamda = pick_lamda_adaptiveness(model, paramtune_loader, alpha, kreg, randomized, allow_zero_sets)
return kreg, lamda, calib_logits
def get_violation(cmodel, loader_paramtune, strata, alpha):
df = pd.DataFrame(columns=['size', 'correct'])
for logit, target in loader_paramtune:
# compute output
output, S = cmodel(logit) # This is a 'dummy model' which takes logits, for efficiency.
# measure accuracy and record loss
size = np.array([x.size for x in S])
I, _, _ = sort_sum(logit.numpy())
correct = np.zeros_like(size)
for j in range(correct.shape[0]):
correct[j] = int( target[j] in list(S[j]) )
batch_df = | pd.DataFrame({'size': size, 'correct': correct}) | pandas.DataFrame |
import cv2
from collections import OrderedDict
import numpy as np
import itertools
from datetime import datetime
from configparser import ConfigParser, MissingSectionHeaderError, NoOptionError, NoSectionError
import pandas as pd
import os
import warnings
warnings.filterwarnings("ignore")
def shap_summary_calculations(INIFILE, shap_df, classifier_name, BASELINE_VAL, save_path):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
configFile = str(INIFILE)
try:
config.read(configFile)
except MissingSectionHeaderError:
print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')
pose_estimation_body_parts = config.get('create ensemble settings', 'pose_estimation_body_parts')
if (pose_estimation_body_parts == '14') or (pose_estimation_body_parts == '16'):
projectPath = config.get('General settings', 'project_path')
shap_logs_path = os.path.join(projectPath, 'logs', 'shap')
if not os.path.exists(shap_logs_path): os.makedirs(shap_logs_path)
simba_cw = os.getcwd()
simba_feat_cat_dir = os.path.join(simba_cw, 'simba', 'assets', 'shap', 'feature_categories')
feat_cat_csv_path = os.path.join(simba_feat_cat_dir, 'shap_feature_categories.csv')
simba_assets_path = os.path.join(simba_cw, 'simba', 'assets', 'shap')
scale_dict = {'baseline_scale': os.path.join(simba_assets_path, 'baseline_scale.jpg'),
'small_arrow': os.path.join(simba_assets_path, 'down_arrow.jpg'),
'side_scale': os.path.join(simba_assets_path, 'side_scale.jpg'),
'color_bar': os.path.join(simba_assets_path, 'color_bar.jpg')}
category_dict = {'Animal distances': {'icon': os.path.join(simba_assets_path, 'animal_distances.jpg')},
'Intruder movement': {'icon': os.path.join(simba_assets_path, 'intruder_movement.jpg')},
'Resident+intruder movement': {'icon': os.path.join(simba_assets_path, 'resident_intruder_movement.jpg')},
'Resident movement': {'icon': os.path.join(simba_assets_path, 'resident_movement.jpg')},
'Intruder shape': {'icon': os.path.join(simba_assets_path, 'intruder_shape.jpg')},
'Resident intruder shape': {'icon': os.path.join(simba_assets_path, 'resident_intruder_shape.jpg')},
'Resident shape': {'icon': os.path.join(simba_assets_path, 'resident_shape.jpg')}}
pos_boxarrow_colors = [(253,141,60), (252,78,42), (227,26,28), (189,0,38), (128,0,38)]
neg_boxarrow_colors = [(65,182,196), (29,145,192), (34,94,168), (37,52,148), (8,29,88)]
ranges_lists = [list(range(0,20)), list(range(20,40)), list(range(40,60)), list(range(60,80)), list(range(80,101))]
colCats = | pd.read_csv(feat_cat_csv_path, header=[0, 1]) | pandas.read_csv |
from qfengine.data.price.price_source import MySQLPriceDataSource as SQLTable
from qfengine.asset import assetClasses
import pandas as pd
from typing import Union, List, Dict
import os
import numpy as np
import logging
import functools
from qfengine import settings
import concurrent.futures
logger = logging.getLogger(__name__)
# todo: MOVE THESE TO RESPECTIVE DIR MODULE
class DataVendorMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`name` varchar(64) NOT NULL,
`website_url` varchar(255) NULL,
`api_endpoint_url` varchar(255) NULL,
`api_key_id` varchar(255) NULL,
`api_key` varchar(255) NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
db_credentials:Dict=None,
name:str = 'data_vendor',
**kwargs
):
super().__init__(
(db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
name,
(DataVendorMySQL.create_schema %name),
**kwargs
)
from qfengine.data import vendor_api
import pandas as pd
#---| init all availables
_vendorAPIs = {api:getattr(vendor_api,api) for api in vendor_api.__all__}
currentInfo = []
_to_pop = []
for api_name, API in _vendorAPIs.items():
_i = {'name':api_name}
for f in self.all_accepted_columns:
try:
f_dat = getattr(API,f)
except:
pass
else:
if callable(f_dat):
f_dat = f_dat()
_i[f] = f_dat
missing_required = [c for c in self.required_columns if c not in _i]
if len(missing_required) != 0:
_to_pop.append(api_name)
else:
currentInfo.append(_i)
currentInfo = pd.DataFrame.from_dict(currentInfo)
currentInfo = currentInfo.where(pd.notnull(currentInfo),None)
upserted = self.upsertDF(currentInfo,["name"])
for p in _to_pop:
_vendorAPIs.pop(p)
self._APIs = {API.name: API for _,API in _vendorAPIs.items()}
#---| check all init for essential funcs
def get_vendor_API(self,vendor:str):
assert vendor in self.List
return self._APIs[vendor]()
@property
def DF(self):
return self._fullDF().set_index("name").reindex(list(self._APIs.keys()))
@property
def List(self):
return list(self._APIs.keys())
class ExchangeMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`ref_id` varchar(32) NOT NULL,
`name` varchar(255) NOT NULL,
`currency` varchar(64) NULL,
`region` varchar(255) NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
db_credentials:Dict = None,
vendors:DataVendorMySQL = None,
name:str = 'exchange',
**kwargs
):
super().__init__(
(db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
name,
(ExchangeMySQL.create_schema %name),
**kwargs
)
self.vendors = vendors or DataVendorMySQL(db_credentials,mdb_conn = self._conn)
def UPDATE(self, vendor:str = 'IEX', DF:pd.DataFrame=None):
if DF is None:
DF = self.vendors.get_vendor_API(vendor).exchangesDF()
upserted = self.upsertDF(DF,['ref_id'])
for i,_count in upserted.items():
if _count > 0:
logger.warning("%s %s Exchanges" %(str(i).upper(), str(_count)))
@property
def DF(self):
return self._fullDF().set_index("ref_id")
@property
def List(self):
return [e[0] for e in self.executeSQL("select ref_id from %s" %self._name)]
class SecurityMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`exchange_id` int NOT NULL,
`symbol` varchar(10) NOT NULL,
`type` varchar(10) NULL,
`name` varchar(255) NULL,
`sector` varchar(255) NULL,
`industry` varchar(255) NULL,
`currency` varchar(32) NULL,
`region` varchar(32) NULL,
`figi` varchar(255) NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`),
KEY `exchange_id` (`exchange_id` ASC),
KEY `symbol` (`symbol` ASC),
CONSTRAINT `fk_exchange_id`
FOREIGN KEY (`exchange_id`)
REFERENCES `exchange` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
db_credentials:Dict = None,
exchanges:ExchangeMySQL=None,
vendors:DataVendorMySQL=None,
name:str = 'security',
**kwargs
):
super().__init__(
(db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
name,
(SecurityMySQL.create_schema %name),
**kwargs
)
self.exchanges = exchanges or ExchangeMySQL(db_credentials, mdb_conn = self._conn)
self.vendors = vendors or self.exchanges.vendors
def UPDATE(self, vendor:str = 'IEX', DF:pd.DataFrame = None,):
if DF is None:
DF = self.vendors.get_vendor_API(vendor).symbolsDF()
if 'exchange_id' not in DF.columns:
potential_exch_cols = [c for c in DF.columns if 'exchange' in c.lower()]
db_exch_id = self.exchanges.DF['id']
for c in potential_exch_cols:
db_exch = db_exch_id.reindex(DF[c].unique())
if len(db_exch.dropna()) == len(db_exch):
db_exch = db_exch.where(pd.notnull(db_exch),None)
DF['exchange_id'] = [db_exch[i] for i in DF[c].values]
break
assert 'exchange_id' in DF.columns, ("unidentified exchange(s) in DF")
DF = DF.drop(columns=potential_exch_cols)
if not isinstance(DF.index, pd.RangeIndex):
DF.reset_index(inplace=True)
assert set(DF.columns).issubset(set(self.all_accepted_columns)), "Unrecognized column(s): %s" %str([c for c in DF.columns if c not in self.all_accepted_columns])
upserted = self.upsertDF(DF,['symbol','exchange_id'])
for i,_count in upserted.items():
if _count > 0:
logger.warning("%s %s Security Symbols" %(str(i).upper(), str(_count)))
@property
def DF(self):
return self._fullDF().set_index("symbol")
@property
def List(self):
return [e[0] for e in self.executeSQL("select symbol from %s" %self._name)]
class DailyPriceMySQL(SQLTable):
create_schema = (
'''
CREATE TABLE `%s` (
`id` int NOT NULL AUTO_INCREMENT,
`data_vendor_id` int NOT NULL,
`symbol_id` int NOT NULL,
`price_date` date NOT NULL,
`open` decimal(19,4) NULL,
`high` decimal(19,4) NULL,
`low` decimal(19,4) NULL,
`close` decimal(19,4) NULL,
`volume` bigint NULL,
`created_date` datetime NULL DEFAULT CURRENT_TIMESTAMP(),
`last_updated_date` datetime NULL DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP(),
PRIMARY KEY (`id`),
KEY `price_date` (`price_date` ASC),
KEY `data_vendor_id` (`data_vendor_id`),
KEY `symbol_id` (`symbol_id`),
CONSTRAINT `fk_symbol_id `
FOREIGN KEY (`symbol_id`)
REFERENCES `security` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_data_vendor_id`
FOREIGN KEY (`data_vendor_id`)
REFERENCES `data_vendor` (`id`)
ON DELETE NO ACTION
ON UPDATE NO ACTION
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
'''
)
def __init__(
self,
asset_type:assetClasses,
db_credentials:Dict=None,
symbols:SecurityMySQL=None,
vendors:DataVendorMySQL=None,
name:str = 'daily_price',
by_vendor:str = None,
symbols_list:List[str] = None,
**kwargs
):
super().__init__(
db_credentials = (db_credentials if db_credentials is not None
else settings.MYSQL_CREDENTIALS),
price_table_name = name,
price_table_schema = (DailyPriceMySQL.create_schema %name),
**kwargs
)
self.symbols = symbols or SecurityMySQL(db_credentials, mdb_conn = self._conn)
self.vendors = vendors or self.symbols.vendors
self.asset_type = asset_type
self.symbols_list = symbols_list
self.by_vendor = by_vendor
if self.by_vendor:
assert self.by_vendor in self.vendors.List
self.symbols_list = self._query_available_symbols_in_database_by_vendor(self.by_vendor)
if settings.PRINT_EVENTS:
print("Initialized DailyPriceMySQL DataSource From Vendor '%s' | Available Symbols Count = %s" %(str(self.by_vendor), str(len(self.symbols_list))))
self._cached_copies = []
#!----------| ABSTRACTED METHODS OF A PRICE DATA SOURCE |------------#
#---| Self-Copy
def create_price_source_copy(self,
cache_copy:bool = False,
):
copy = DailyPriceMySQL(
asset_type = self.asset_type,
db_credentials = self._db_credentials.copy(),
name = self._full_credentials['table_name'],
by_vendor = None,
)
copy.by_vendor = self.by_vendor #--| skip SQL vetting
copy.symbols_list = (self.symbols_list.copy() if self.symbols_list else self.symbols_list)
if cache_copy:
self._cached_copies.append(copy)
return copy
#---------------------------|
#------| Assets/Universe
def assetsDF(self,
**kwargs
)->pd.DataFrame:
df = self.symbols.DF.astype(str)
if self.symbols_list:
df = df.reindex(self.symbols_list)
if 'sector' in kwargs:
assert isinstance(kwargs['sector'],str)
df = df[df.sector == kwargs['sector']]
return df
def assetsList(self,
**kwargs
)->list:
return list(self.assetsDF(**kwargs).index.values)
@property
def sectorsList(self)->list:
return self.symbols.DF.sector.dropna().unique()
#---------------------------|
#-----------| Price
def get_assets_bid_ask_dfs(self,
asset:str,
*assets:str,
start_dt=None,
end_dt=None,
)->pd.DataFrame:
return self._price_dfs_to_bid_ask_dfs(
self.get_assets_historical_price_dfs(asset,
*assets,
start_dt = start_dt,
end_dt = end_dt
)
)
def get_assets_historical_price_dfs(self,
asset:str,
*assets:str,
price:str = None,
start_dt = None,
end_dt = None,
adjusted = None,
**kwargs
)->pd.DataFrame:
if price:
assert price in [
"open", "high", "low",
"close","volume"
]
#--| parallelizing queries for performance
symbols = [asset] + [s for s in assets]
result = self._assets_daily_price_DF(*symbols)
if price:
result = result[
[col for col in result.columns if price in col]
]
result.columns = result.columns.get_level_values('symbols')
if start_dt:
result = result[result.index >= self._format_dt(start_dt)]
if end_dt:
result = result[result.index <= self._format_dt(end_dt)]
return result
#---------------------------|
#----| Price Date Ranges
@functools.lru_cache(maxsize = 1024 * 1024)
def get_assets_price_date_ranges_df(self,
asset:str,
*assets:str,
)->pd.DataFrame:
symbols = [asset] + [s for s in assets]
if self.symbols_list:
assert set(symbols).issubset(self.symbols_list)
def _get_result(source, symbol, vendor):
return {
'symbol': symbol,
'start_dt': self._format_dt(source._asset_symbol_min_price_date_by_vendor(symbol, vendor)),
'end_dt': self._format_dt(source._asset_symbol_max_price_date_by_vendor(symbol,vendor)),
}
final_df = pd.DataFrame()
for vendor in self.vendorsList:
result = pd.DataFrame.from_dict(
list(
concurrent.futures.ThreadPoolExecutor().map(
_get_result,
*zip(*(
(
self.create_price_source_copy(cache_copy = True),
symbol,
vendor,
) for symbol in symbols
))
)
)
).set_index('symbol').dropna()
self._close_cached_copies()
final_df = final_df.append(result)
symbols = [s for s in symbols if s not in final_df.index]
if len(symbols) == 0:
break
return final_df
def get_assets_minimum_start_dt(self,
asset:str,
*assets:str,
)->pd.Timestamp:
return self._format_dt(max(
self.get_assets_price_date_ranges_df(
asset, *assets
).start_dt.values
))
def get_assets_maximum_end_dt(self,
asset:str,
*assets:str,
)->pd.Timestamp:
return self._format_dt(min(
self.get_assets_price_date_ranges_df(
asset, *assets
).end_dt.values
))
#---------------------------|
#!----------------------------------------------------------------#
def update_assets_daily_price(self,
vendor:str,
batch_size:int=100,
symbols_to_update:List[str] = None,
skip_empty_update:bool = True,
DF:pd.DataFrame=None,
):
assert vendor in self.vendors.List
vendor_id = self.vendors.DF.id[vendor]
symbols_id = self.symbols.DF['id']
inserted_count = 0
updated_count = 0
t0 = pd.Timestamp.now()
if DF is not None: #---| UPSERT ONCE WITH GIVEN DATAFRAME (NO API CALLS WILL BE MADE)
self._upsert_daily_price_DF(DF,['price_date', 'symbol_id', 'data_vendor_id'])
else: #---| PERFORM UPSERT IN BATCHES OF SYMBOLS BY MAKING API CALLS
if symbols_to_update is not None:
omitted = [s for s in symbols_to_update if s not in symbols_id.index]
if len(omitted) > 0:
logger.warning("Omitting %s given symbols that are not in database universe" %str(len(omitted)))
logger.warning(str(omitted))
symbols_id = symbols_id.reindex([s for s in symbols_to_update if s not in omitted])
symbols_id = dict(symbols_id)
assert len(symbols_id) != 0, "No symbols in Symbol table, code out schematic or manually prepare it (symbol.csv)"
logger.warning("----------------------------------------------------------------")
logger.warning("---------------| Updating Equities Daily Prices |---------------")
logger.warning("----------------------------------------------------------------")
logger.warning("Checking database for latest available price_date from '%s' of %s symbols..." %(vendor,str(len(symbols_id))))
#!---| Query for all symbols to get their max price_dates from SQL Database
symbols_by_max_dates = {}
for s in symbols_id:
d = self._asset_symbol_max_price_date_by_vendor(s,vendor)
if d not in symbols_by_max_dates:
symbols_by_max_dates[d] = []
if s not in symbols_by_max_dates[d]:
symbols_by_max_dates[d].append(s)
#!---| Download latest daily price for symbols with their respective start dates & process for upsert to Database
logger.warning("Performing Update in batches of %s" %str(batch_size))
for start_date,symbols in symbols_by_max_dates.items():
logger.warning("For %s symbols with '%s' as start_date:" %(str(len(symbols)),str(start_date)))
i = 0
batch_number = 1
while True:
symbols_batch = symbols[i:i+batch_size]
batch_data = self.vendors.get_vendor_API(vendor).get_barset(symbols_batch,"1D",start_date)
try:
batch_data = self._transform_DF_for_daily_price_upsert(batch_data, vendor = vendor, start_date=start_date)
except Exception:
logger.warning("Cannot transform, skipping this batch of symbols: %s" %str(symbols_batch))
pass
else:
upserted = self.upsertDF(batch_data,no_filter=True)
for a,_count in upserted.items():
if _count > 0:
if a == 'inserted':
logger.warning(" Batch #%s : %s New Data Points Inserted" %(str(batch_number),_count))
inserted_count += _count
elif a == 'updated':
logger.warning(" Batch #%s : %s Existing Data Points Updated" %(str(batch_number),_count))
updated_count += _count
if (upserted['inserted'] == upserted['updated'] == 0) and skip_empty_update:
logger.warning(" No New Data Upserted. Skipping remaining symbols (set skip_empty_update=False for otherwise)")
break
#---| Loop Breaker
if symbols_batch[-1] == symbols[-1]:
break
else:
i += batch_size
batch_number += 1
print("Update Completed:")
print("--Total Data Points Inserted: %s" %(str(inserted_count)))
print("--Total Data Points Updated: %s" %(str(updated_count)))
print("--Total Time Elapsed: %s" %(str(pd.Timestamp.now() - t0)))
@property
def vendorsDF(self,)->pd.DataFrame:
return self.vendors.DF.reindex(self.vendorsList)
@property
def vendorsList(self,)->list:
return self.vendors.List if self.by_vendor is None else [self.by_vendor]
#!---| BACKEND
def _transform_DF_for_daily_price_upsert(self,
upsert_df:pd.DataFrame,
**kwargs
):
df = upsert_df.copy()
#---| Case I: Basic DF with no MultiIndex columns
if not isinstance(df.columns,pd.MultiIndex):
assert 'symbol_id' in df.columns
if 'price_date' not in df.columns:
assert not isinstance(df.index, pd.RangeIndex)
df.index = pd.DatetimeIndex(df.index)
df['price_date'] = df.index.values
df.index = range(len(df))
if 'data_vendor_id' not in df.columns:
vendor_id = int(self.vendors.DF.id[kwargs['vendor']]) if 'vendor' in kwargs else (
int(kwargs['vendor_id']) if 'vendor_id' in kwargs else None
)
assert isinstance(vendor_id, int)
df['data_vendor_id'] = vendor_id
return df.where(pd.notnull(df), None)
#---| Case II: MultiIndex Columns of (symbols, columns)
else:
assert not isinstance(df.index, pd.RangeIndex)
df.index = pd.DatetimeIndex(df.index)
symbols_id = dict(kwargs['symbols_id']) if 'symbols_id' in kwargs else dict(self.symbols.DF['id'])
vendor_id = int(self.vendors.DF.id[kwargs['vendor']]) if 'vendor' in kwargs else (
int(kwargs['vendor_id']) if 'vendor_id' in kwargs else None
)
assert isinstance(vendor_id, int)
assert isinstance(symbols_id, dict)
try:
df_symbols = list(df.columns.get_level_values('symbols').unique())
except KeyError:
if settings.PRINT_EVENTS:
print("Daily Price columns does not contain 'symbols' as name. "
"Attempting to grab the first index locations..."
)
df_symbols = list(pd.Series([i[0] for i in df.columns]).unique())
assert set(df_symbols).issubset(set(symbols_id.keys())), (
"Daily Price data contains unidentified symbol(s) without id(s): %s" %(
str([
s for s in df_symbols if s not in symbols_id
])
)
)
start_date = pd.Timestamp(kwargs['start_date']) if ('start_date' in kwargs) else None
transformed_df = pd.DataFrame()
for s in df_symbols:
_df = df[s]
_df.index = pd.DatetimeIndex(_df.index)
if start_date:
_df = _df[_df.index > start_date]
_df['symbol_id'] = symbols_id[s]
_df['price_date'] = _df.index.values
_df.index = range(len(_df))
transformed_df = transformed_df.append(_df)
transformed_df['data_vendor_id'] = vendor_id
transformed_df.index = range(len(transformed_df))
transformed_df = transformed_df.where(pd.notnull(transformed_df), None)
return transformed_df
def _upsert_daily_price_DF(self,
DF:pd.DataFrame,
filter_columns:List[str] = None,
**kwargs
):
if settings.PRINT_EVENTS:
print("Upserting Equities Daily Price with given DF.")
upserted = self.upsertDF(self._transform_DF_for_daily_price_upsert(DF, **kwargs), filter_columns)
for a,_count in upserted.items():
if _count > 0:
if a == 'inserted':
logger.warning("%s New Data Points Inserted" %(str(_count)))
elif a == 'updated':
logger.warning("%s Existing Data Points Updated" %(str(_count)))
if (upserted['inserted'] == upserted['updated'] == 0):
logger.warning("No New Data Upserted from DF given.")
@functools.lru_cache(maxsize = 1024 * 1024)
def _assets_daily_price_DF(self,
asset:str,
*assets:str
):
symbols = [asset] + [s for s in assets]
if self.symbols_list:
assert set(symbols).issubset(self.symbols_list)
result = pd.DataFrame()
for vendor in self.vendorsList:
all_dfs = concurrent.futures.ThreadPoolExecutor().map(
self.__class__._query_asset_symbol_daily_price_DF,
*zip(*(
(
self.create_price_source_copy(cache_copy = True),
symbol,
vendor,
) for symbol in symbols
))
)
final_df = pd.concat([
d.where(pd.notna(d), np.nan) for d in all_dfs if (
not d.where(pd.notna(d), np.nan).dropna().empty
)
], axis=1)
self._close_cached_copies()
final_df.columns.names = ('symbols','columns')
final_df = final_df.set_index(final_df.index.tz_localize(settings.TIMEZONE))
result = pd.concat([result,final_df], axis=1)
symbols = [s for s in symbols if s not in result.columns.get_level_values('symbols')]
if len(symbols) == 0:
break
if len(symbols) > 0:
if settings.PRINT_EVENTS:
print("Warning: Queried Daily Prices DataFrame is missing %s symbols:" %len(symbols))
print(symbols)
return result
@functools.lru_cache(maxsize = 1024 * 1024)
def _asset_symbol_max_price_date_by_vendor(self,
asset:str,
vendor:str
):
dat = self.executeSQL(
'''
SELECT max(price_date)
FROM %s dp
INNER JOIN %s sym
INNER JOIN %s vendor
ON
dp.symbol_id = sym.id AND
dp.data_vendor_id = vendor.id
WHERE
sym.symbol = '%s' AND
vendor.name = '%s' AND
dp.close IS NOT NULL AND
dp.open IS NOT NULL
''' %(
self._name,
self.symbols._name,
self.vendors._name,
asset,
vendor
)
)
return None if len(dat) == 0 else dat[0][0]
@functools.lru_cache(maxsize = 1024 * 1024)
def _asset_symbol_min_price_date_by_vendor(self,
asset:str,
vendor:str
):
dat = self.executeSQL(
'''
SELECT min(price_date)
FROM %s dp
INNER JOIN %s sym
INNER JOIN %s vendor
ON
dp.symbol_id = sym.id AND
dp.data_vendor_id = vendor.id
WHERE
sym.symbol = '%s' AND
vendor.name = '%s' AND
dp.close IS NOT NULL AND
dp.open IS NOT NULL
''' %(
self._name,
self.symbols._name,
self.vendors._name,
asset,
vendor
)
)
return None if len(dat) == 0 else dat[0][0]
@functools.lru_cache(maxsize = 1024 * 1024)
def _query_available_symbols_in_database_by_vendor(self, vendor:str):
assert vendor in self.vendors.List
sqlDF = self.assetsDF().reset_index().set_index('id')
sqlDF.index = sqlDF.index.astype(int)
df = sqlDF.reindex(
[
int(i[0]) for i in self.executeSQL(
'select distinct symbol_id from %s where data_vendor_id = %s' %(
self._name,
self.vendors.DF.id[vendor]
)
)
]
)
df = df.reindex(df[['symbol']].dropna().index)
return [] if df.empty else list(df.set_index('symbol').sort_index().index.values)
def _convert_dt_to_date(self, dt):
return pd.Timestamp(
self._format_dt(dt).date(),
tz = settings.TIMEZONE
)
def _format_dt(self, dt):
try:
return pd.Timestamp(dt).tz_convert(settings.TIMEZONE)
except TypeError:
try:
return pd.Timestamp(dt).tz_localize(settings.TIMEZONE)
except:
raise
def _close_cached_copies(self):
for source in self._cached_copies:
source._conn.close()
self._cached_copies = []
def _query_asset_symbol_daily_price_DF(self,
asset:str,
vendor:str,
)->pd.DataFrame:
df_cols = [
"open", "high", "low",
"close","volume"
]
select_str = "dp.price_date, " + str(
[
".".join(["dp",p]) for p in df_cols
]
)[1:-1].replace("'","")
cond_str = "vendor.name = '%s' AND sym.symbol = '%s'" %(vendor, asset)
sql = (
'''
SELECT %s
FROM %s as sym
INNER JOIN %s AS dp
INNER JOIN %s AS vendor
ON
dp.symbol_id = sym.id AND
dp.data_vendor_id = vendor.id
WHERE
%s
ORDER BY
dp.price_date ASC
'''%(
select_str,
self.symbols._name,
self._name,
self.vendors._name,
cond_str
)
)
dat = np.array(self.executeSQL(sql))
cols = [
np.array([asset for _ in df_cols]),
np.array(df_cols)
]
if len(dat) == 0:
return pd.DataFrame(columns = cols)
else:
df = pd.DataFrame(
data = dat[:,1:],
index = pd.DatetimeIndex(dat[:,0]),
columns = cols
)
for c in df.columns:
df[c] = pd.to_numeric(df[c])
return df
def _price_dfs_to_bid_ask_dfs(self,
price_df:pd.DataFrame
):
def _symbol_price_to_bid_ask(bar_df, symbol):
cols = [
np.array([symbol, symbol]),
np.array(['bid', 'ask'])
]
if bar_df.dropna().empty:
return pd.DataFrame(columns = cols)
bar_df = bar_df.sort_index()
oc_df = bar_df.loc[:, ['open', 'close']]
oc_df['pre_market'] = oc_df['close'].shift(1)
oc_df['post_market'] = oc_df['close']
oc_df = oc_df.dropna()
# Convert bars into separate rows for open/close prices
# appropriately timestamped
seq_oc_df = oc_df.T.unstack(level=0).reset_index()
seq_oc_df.columns = ['datetime', 'market', 'price']
seq_oc_df.loc[seq_oc_df['market'] == 'open', 'datetime'] += | pd.Timedelta(hours=9, minutes=30) | pandas.Timedelta |
# cheat sheet https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py
# https://docs.streamlit.io/en/stable/index.html
import streamlit as st
import pandas as pd
import numpy as np
import time
# title
st.title('Uber pickups in NYC')
st.write(f" Streamlit version:{st.__version__}")
# get data
DATE_COLUMN = 'date/time'
DATA_URL = 'https://s3-us-west-2.amazonaws.com/streamlit-demo-data/uber-raw-data-sep14.csv.gz'
@st.cache
def load_data(nrows):
data = pd.read_csv(DATA_URL, nrows=nrows)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
data[DATE_COLUMN] = | pd.to_datetime(data[DATE_COLUMN]) | pandas.to_datetime |
# this program breaks down into a csv where phosphosite orthologs could be lost in the PhosphositeOrthology program
# PhosphositePlus is being used to verify that my orthologs are correct but PSP does not have everything which is the
# reason for using dbPAF
# we want to make sure that the UniprotIDs contained in BOTH PSP and dbPAF end up in the orthologs list (ID_in_dbPAF)
# possible reasons why the candidates may not make it through are:
# dbPAF-OMA conversion table does not contain uniprotID -- see about checking both rev & unrev ID (ID_in_dbPAF_OMA_conversion_table)
# **** issues with having both reviewed and unreviewed ids in conversion table
# OMA program does not actually identify ortholog that PSP does -- in this case no fix (in_OMA_orthologs)
# or my alignment does not work well to identify phos orthologs (in_griffin_phos_orthologs)
import pandas as pd
from sqlalchemy import create_engine
import os
# Don't use excel file, interprets genes as dates
dbPAF_df = pd.read_table('../TOTAL.elm', dtype=object)
psp_df = pd.read_table('Phosphorylation_site_dataset.tab', dtype=object)
psp_df = psp_df.loc[psp_df['ORGANISM'].isin(['human', 'mouse', 'rat', 'fruit fly'])] # filter out animals not in this list
comparison_df = psp_df[['SITE_GRP_ID', 'ORGANISM', 'ACC_ID', 'PROTEIN', 'MOD_RSD']].copy() # copy only necessary columns
comparison_df.rename(columns={'SITE_GRP_ID': 'PSP_SITE_GRP_ID', 'ACC_ID': 'Uniprot_ACC_ID'}, inplace=True) # give cols more specific names
comparison_df['Position'] = comparison_df.MOD_RSD.str[1:-2] # 'S23-p' --> '23' # Position is also str in dbPAF_df
comparison_df['Type'] = comparison_df.MOD_RSD.str[0] # 'S23-p' --> 'S'
# check if the UniprotIDs in PhosphositePlus are also in dbPAF
comparison_df['ID_in_dbPAF'] = comparison_df['Uniprot_ACC_ID'].isin(dbPAF_df['Uniprot'])
# check if UniprotID, site, and amino acid type from PSP are also in dbPAF
comparison_df['ID_and_site_in_dbPAF'] = comparison_df['Uniprot_ACC_ID'].isin(dbPAF_df['Uniprot']) \
& comparison_df['Position'].isin(dbPAF_df['Position']) \
& comparison_df['Type'].isin(dbPAF_df['Type'])
# check if the UniprotIDs from PSP are in the dbPAF to OMA conversion table
oma_uniprot_df = | pd.read_table('oma-uniprot_clean.txt', dtype=object) | pandas.read_table |
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(description="Analyse the logs produced by torchbeast")
parser.add_argument("--dir", type=str, default="~/logs/torchbeast", help="Directory for log files.")
parser.add_argument("--mode", type=str, default="table", choices=["table", "plot", "joint_plot", "group_plot"])
parser.add_argument("--idx", "--index", nargs="+", required=True)
parser.add_argument("--repeats", default=3, type=int)
parser.add_argument("--steps", default=float('inf'), type=float)
parser.add_argument("--baseline", default=0.0, type=float)
parser.add_argument("--labels",nargs="+")
parser.add_argument("--name", type=str, default="")
def joint_plot(indexes, labels, dir, steps, name):
plt.figure()
dfs = []
for index, label in zip(indexes, labels):
df = pd.read_csv(os.path.join(dir, index, "logs.csv"), index_col="# _tick")
df = df[df["step"] < steps]
df["label"] = label
df["smoothed return"] = df["mean_episode_return"].ewm(span=200).mean()
dfs.append(df)
data = | pd.concat(dfs) | pandas.concat |
import torch
import pandas as pd
import numpy as np
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import transformers as ppb
from transformers import BertForSequenceClassification, AdamW, BertConfig
from torch.utils.data import TensorDataset,DataLoader, RandomSampler, SequentialSampler
from keras.utils import to_categorical
import time
import datetime
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('agg')
import seaborn as sns
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
def get_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def tokenize_data(tokenizer, sentences, max_len):
# Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
attention_masks = []
# For every sentence...
for sent in sentences:
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = max_len, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
return torch.cat(input_ids, dim=0), torch.cat(attention_masks, dim=0)
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# data
train = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv')
validation = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv')
test = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/test.csv')
sub =pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')
# unccoment train2.drop
train.drop(train.columns.difference(['id','comment_text','toxic']),axis=1,inplace=True)
#train2.drop(train2.columns.difference(['id','comment_text','toxic']),axis=1,inplace=True)
# uncomment train2['toxic'] =
# Uncomment train = train.append
print(set(list(train.toxic.values)))
#train2['toxic'] = train2['toxic'].apply(lambda x: 1 if x>0.5 else 0)
#print(set(list(train2.toxic.values)))
print(train.shape)
#train = train.append(train2, ignore_index=True)
print(train.shape)
train_labels=train.toxic.values
dev_labels= validation.toxic.values
#test_labels = test.toxic.values
# For DistilBERT:
tokenizer_class, pretrained_weights = (ppb.DistilBertTokenizer, 'distilbert-base-multilingual-cased')
# Load pretrained model/tokenizer
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
#model = model_class.from_pretrained(pretrained_weights)
max_len = 256
#for sent in train.comment_text:
# max_len = max(max_len, len(tokenizer.encode(sent, add_special_tokens=True)))
print('Max sentence length: ', max_len)
# Prepare data
print("Preparing training and development datasets")
batch_size = 32
train_input_ids, train_attention_masks = tokenize_data(tokenizer, train.comment_text, max_len)
train_labels = torch.tensor(train_labels)
train_dataset = TensorDataset(train_input_ids, train_attention_masks, train_labels)
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler = RandomSampler(train_dataset), # Select batches randomly
batch_size = batch_size # Trains with this batch size.
)
dev_input_ids, dev_attention_masks = tokenize_data(tokenizer, validation.comment_text, max_len)
dev_labels = torch.tensor(dev_labels)
dev_dataset = TensorDataset(dev_input_ids, dev_attention_masks, dev_labels)
dev_dataloader = DataLoader(
dev_dataset, # The validation samples.
sampler = SequentialSampler(dev_dataset), # Pull out batches sequentially.
batch_size = batch_size # Evaluate with this batch size.
)
#model
print("Load Pretrained model")
model = BertForSequenceClassification.from_pretrained(
pretrained_weights, # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model.to(device)
optimizer = AdamW(model.parameters(),lr = 2e-5, eps = 1e-8)
epochs = 5
total_steps = len(train_dataloader) * epochs
# learning rate scheduler
scheduler = ppb.get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,num_training_steps = total_steps)
training_stats = []
total_t0 = time.time()
min_val_loss = 100000 # just a very big value
print("Start training")
for epoch_i in range(0, epochs):
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
t0 = time.time()
total_train_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
if step % 40 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
loss, logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask,
labels=b_labels)
total_train_loss += loss.item()
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(train_dataloader)
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
print("")
print("Running Validation...")
t0 = time.time()
model.eval()
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
for batch in dev_dataloader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad():
(loss, logits) = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_eval_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
total_eval_accuracy +=get_accuracy(logits, label_ids)
avg_val_accuracy = total_eval_accuracy / len(dev_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
avg_val_loss = total_eval_loss / len(dev_dataloader)
validation_time = format_time(time.time() - t0)
if avg_val_loss < min_val_loss:
print("Saving the model, validatin loss imporoved from", min_val_loss, "to", avg_val_loss)
min_val_loss = avg_val_loss
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(".")
tokenizer.save_pretrained(".")
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
pd.set_option('precision', 2)
df_stats = | pd.DataFrame(data=training_stats) | pandas.DataFrame |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from sklearn.pipeline import Pipeline
from hcrystalball.feature_extraction import HolidayTransformer
@pytest.mark.parametrize(
"X_y_with_freq, country_code, country_code_column, country_code_column_value, extected_error",
[
("series_with_freq_D", "DE", None, None, None),
("series_with_freq_D", None, "holiday_col", "DE", None),
("series_with_freq_M", "DE", None, None, ValueError), # not daily freq
("series_with_freq_Q", "DE", None, None, ValueError), # not daily freq
("series_with_freq_Y", "DE", None, None, ValueError), # not daily freq
(
"series_with_freq_D",
None,
"holiday_colsssss",
"DE",
KeyError,
), # there needs to be holiday_col in X
(
"series_with_freq_D",
None,
None,
None,
ValueError,
), # needs to have country_code or country_code_column
(
"series_with_freq_D",
"LALA",
"LALA",
None,
ValueError,
), # cannot have country_code and country_code_column in the same time
(
"series_with_freq_D",
"LALA",
None,
None,
ValueError,
), # country_code needs to be proper country
(
"series_with_freq_D",
None,
"holiday_col",
"Lala",
ValueError,
), # country_code needs to be proper country
],
indirect=["X_y_with_freq"],
)
def test_holiday_transformer_inputs(
X_y_with_freq,
country_code,
country_code_column,
country_code_column_value,
extected_error,
):
X, _ = X_y_with_freq
if extected_error is not None:
with pytest.raises(extected_error):
holiday_transformer = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
)
if country_code_column:
X["holiday_col"] = country_code_column_value
holiday_transformer.fit_transform(X)
else:
holiday_transformer = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
)
if country_code_column:
X[country_code_column] = country_code_column_value
holiday_transformer.fit_transform(X)
if country_code_column:
assert holiday_transformer.get_params()["country_code"] is None
@pytest.mark.parametrize(
"country_code, country_code_column, country_code_column_value, exp_col_name",
[
("CZ", None, None, "_holiday_CZ"),
(None, "holiday_col", "CZ", "_holiday_holiday_col"),
],
)
def test_holiday_transformer_transform(
country_code, country_code_column, country_code_column_value, exp_col_name
):
expected = {exp_col_name: ["Labour Day", "", "", "", "", "", "", "Liberation Day", "", ""]}
X = pd.DataFrame(index=pd.date_range(start="2019-05-01", periods=10))
df_expected = pd.DataFrame(expected, index=X.index)
if country_code_column:
X[country_code_column] = country_code_column_value
df_result = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
).fit_transform(X)
assert_frame_equal(df_result, df_expected)
@pytest.mark.parametrize(
"country_code_first, country_code_column_first, country_code_column_first_value, "
"country_code_second, country_code_column_second, country_code_column_second_value",
[
("CZ", None, None, "SK", None, None),
(None, "czech", "CZ", None, "slovak", "SK"),
("CZ", None, None, None, "slovak", "SK"),
(None, "czech", "CZ", "SK", None, None),
],
)
def test_two_transformers(
country_code_first,
country_code_column_first,
country_code_column_first_value,
country_code_second,
country_code_column_second,
country_code_column_second_value,
):
first_suffix = country_code_first or country_code_column_first
second_suffix = country_code_second or country_code_column_second
expected = {
f"_holiday_{first_suffix}": [
"Labour Day",
"",
"",
"",
"",
"",
"",
"Liberation Day",
"",
"",
],
f"_holiday_{second_suffix}": [
"Labour Day",
"",
"",
"",
"",
"",
"",
"Liberation Day",
"",
"",
],
}
X = pd.DataFrame(index=pd.date_range(start="2019-05-01", periods=10))
df_expected = | pd.DataFrame(expected, index=X.index) | pandas.DataFrame |
"""Helper Functions to Support Pairs Trading
This file can be imported as a module and contains the following functions:
* create_and_save_historicals - returns a df with all coin information
* binance_data_to_df - historical information for a single coin
* two_coin_pricing - historical log pricing of two coins and their difference in log pricing
* single_stationarity_test - stationarity test for a pd series
* pair_stationarity_test - stationarity test for the difference in log pricing between two coins
* potential_pairs - list of all coin pairs that are stationary and have sufficient trade volume
* ranked_crossing - ranked list of coin pairs based on how often they mean reverted
* ranked_volatility - ranked list of coin pairs based on volatility
* composite_ranking - ranked list of coin pairs combining ranked_crossing and ranked_volatility
"""
import pandas as pd
import numpy as np
import math
import os
import os.path
from datetime import datetime, timedelta, datetime
from dateutil import parser
from statsmodels.tsa.stattools import coint, adfuller
from itertools import combinations
import binance
from binance.client import Client
from typing import Union, Optional
BINANCE_CLIENT = Client(os.getenv('BINANCE_KEY'), os.getenv('BINANCE_SECRET_KEY'))
BINSIZES = {"1m": 1, "5m": 5, "1h": 60, "1d": 1440}
# batch_size = 750
# BINANCE_SYMBOLS = ['YOYOBTC', 'SNGLSBTC', 'FUNBTC', 'SNMBTC', 'XVGBTC', 'SNTBTC', 'MTHBTC', 'VIBBTC', 'TRXBTC', 'XRPBTC', 'AMBBTC', 'CDTBTC', 'QSPBTC', 'BTSBTC', 'ADABTC', 'XLMBTC', 'CNDBTC', 'TNBBTC', 'GTOBTC', 'OSTBTC', 'VIBEBTC', 'IOSTBTC', 'ZILBTC', 'XEMBTC', 'WPRBTC', 'LOOMBTC', 'IOTXBTC', 'QKCBTC', 'DATABTC', 'SCBTC', 'VETBTC', 'DOCKBTC', 'GOBTC', 'RVNBTC', 'MITHBTC', 'FETBTC', 'CELRBTC', 'MATICBTC', 'PHBBTC', 'TFUELBTC', 'ONEBTC', 'FTMBTC', 'DOGEBTC', 'ANKRBTC', 'COSBTC', 'PERLBTC', 'CHZBTC', 'HBARBTC', 'NKNBTC', 'ARPABTC', 'TROYBTC', 'VITEBTC', 'DREPBTC', 'TCTBTC', 'COTIBTC', 'STPTBTC', 'CHRBTC', 'MDTBTC', 'STMXBTC', 'DGBBTC', 'JSTBTC', 'RSRBTC', 'NBSBTC', 'AKROBTC', 'FORBTC', 'ROSEBTC', 'REEFBTC', 'CKBBTC', 'LINABTC', 'TLMBTC']
# BINANCE_SYMBOLS = [x['symbol'] for x in BINANCE_CLIENT.get_all_tickers() if x['symbol'][-3:]=="BTC"]
BINANCE_SYMBOLS = ['1INCHBTC', 'AAVEBTC', 'ADABTC', 'ALGOBTC', 'ALICEBTC', 'ALPHABTC', 'AMBBTC', 'ANKRBTC', 'ASTBTC', 'ATOMBTC', 'AVAXBTC', 'BATBTC', 'BCHBTC', 'BLZBTC', 'BNBBTC', 'BQXBTC', 'CELRBTC', 'CHRBTC', 'CHZBTC', 'COTIBTC', 'DEGOBTC', 'DIABTC', 'DOGEBTC', 'DOTBTC', 'DREPBTC', 'DUSKBTC', 'ENJBTC', 'EOSBTC', 'ETHBTC', 'FETBTC', 'FILBTC', 'FTMBTC', 'HBARBTC', 'IOSTBTC', 'JSTBTC', 'KAVABTC', 'KNCBTC', 'LINKBTC', 'LRCBTC', 'LTCBTC', 'LUNABTC', 'MANABTC', 'MATICBTC', 'MDTBTC', 'MITHBTC', 'NEOBTC', 'OGNBTC', 'ONEBTC', 'ONTBTC', 'REEFBTC', 'ROSEBTC', 'RVNBTC', 'SANDBTC', 'SCBTC', 'SOLBTC', 'STMXBTC', 'SUSHIBTC', 'SXPBTC', 'TFUELBTC', 'THETABTC', 'TROYBTC', 'TRXBTC', 'TVKBTC', 'UNIBTC', 'VETBTC', 'WBTCBTC', 'XEMBTC', 'XLMBTC', 'XMRBTC', 'XRPBTC', 'XTZBTC', 'XVGBTC', 'ZILBTC']
def create_and_save_historicals(kline_size: str, start_date: Optional[str] = '1 Feb 2020', end_date: Optional[str] = None, save_compiled: Optional[bool] = False, save_individual: Optional[bool] = False) -> pd.DataFrame:
"""Pools historical information of all coins in BINANCE_SYMBOLS using the Binance API
Parameters
----------
kline_size : str
How often the data should be pulled. Options are: 1m, 5m, 1h, 1d
start_date : str, optional
The first day of data collection. Format: '%d %b %Y' (defaults is '1 Feb 2021')
end_date : str, optional
Last day of data collections. Format: '%d %b %Y' (defaults is today's date)
save_compiled : bool, optional
An option to save the full dataframe in a CSV file named 'full_data.csv' (default is False)
save_individual : bool, optional
An option to save the individual coins' data in CSV files (default is False)
Returns
-------
pd.DataFrame
DataFrame with the historical information and statistics of the coins listed in BINANCE_SYMBOLS
"""
df = pd.DataFrame()
for symbol in BINANCE_SYMBOLS:
try:
df1 = binance_data_to_df(symbol=symbol, kline_size=kline_size, start_date=start_date, end_date=end_date, save=save_individual)
df1['coin'] = symbol
df = df.append(df1, True)
except:
pass
# convert all except the following columns to numeric type
cols = [i for i in df.columns if i not in ["coin", "close_time"]]
for col in cols:
df[col] = pd.to_numeric(df[col])
df['log_close'] = np.log10(df['close'])
df['%change'] = (df['close'] - df['open']) / df['open']
if save_compiled:
df.to_csv('data/full_data.csv', index=None)
return df
def binance_data_to_df(symbol: str, kline_size: str, start_date: str, end_date: str, save: bool) -> pd.DataFrame:
"""Helper function for create_and_save_historicals. It queries the Binance API to grab historical information on one coin
Parameters
----------
symbol : str
The coin's symbol as listed in Binance
kline_size : str
How often the data should be pulled. Options are: '1m', '5m', '1h', '1d'
start_date : str
The first day of data collection. Format: '%d %b %Y'
end_date : str
Last day of data collections. Format: '%d %b %Y'
save : bool
An option to save the coin's data in a CSV file
Returns
-------
pd.DataFrame
DataFrame with the historical information of a coin
"""
filename = '%s-%s-data.csv' % (symbol, kline_size)
# get the start date as oldest_point and newest_point as the last datapoint available
oldest_point = datetime.strptime(start_date, '%d %b %Y')
if end_date is None:
newest_point = pd.to_datetime(BINANCE_CLIENT.get_klines(symbol=symbol, interval=kline_size)[-1][0], unit='ms')
else:
newest_point = datetime.strptime(end_date, '%d %b %Y')
# calculate number of minutes between start and end point
delta_min = (newest_point - oldest_point).total_seconds() / 60
#create a bucket for each time segment by dividing total minutes by the corresponding binsize (no. of min in each bucket)
available_data = math.ceil(delta_min / BINSIZES[kline_size])
if oldest_point == datetime.strptime(start_date, '%d %b %Y'):
print('Downloading all available %s data for %s. Be patient..!' % (kline_size, symbol))
else:
print('Downloading %d minutes of new data available for %s, i.e. %d instances of %s data.' % (delta_min, symbol, available_data, kline_size))
klines = BINANCE_CLIENT.get_historical_klines(symbol, kline_size, oldest_point.strftime("%d %b %Y %H:%M:%S"), newest_point.strftime("%d %b %Y %H:%M:%S"))
data = | pd.DataFrame(klines, columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore' ]) | pandas.DataFrame |
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import aampi, core, config
import pytest
import naive
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
def test_aampi_int_input():
with pytest.raises(TypeError):
aampi(np.arange(10), 5)
def test_aampi_self_join():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
right_left_P = stream.left_P_
right_left_I = stream.left_I_
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
left_left_P = np.full(left_P.shape, np.inf)
left_left_I = left[:, 2]
for i, j in enumerate(left_left_I):
if j >= 0:
D = core.mass_absolute(stream.T_[i : i + m], stream.T_[j : j + m])
left_left_P[i] = D[0]
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
right_left_P = stream.left_P_
right_left_I = stream.left_I_
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
def test_aampi_self_join_egress():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
T = np.random.rand(n)
T = pd.Series(T)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_init_nan_inf_self_join(substitute, substitution_locations):
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
# seed = 58638
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
stream.T_[substitution_location] = substitute
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_init_nan_inf_self_join_egress(substitute, substitution_locations):
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
# seed = 58638
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
T = | pd.Series(T) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Starbucks Capstone Challenge
#
# ### Introduction
#
# This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks.
#
# Not all users receive the same offer, and that is the challenge to solve with this data set.
#
# Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.
#
# Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.
#
# You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer.
#
# Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer.
#
# ### Example
#
# To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.
#
# However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer.
#
# ### Cleaning
#
# This makes data cleaning especially important and tricky.
#
# You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers.
#
# ### Final Advice
#
# Because this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A).
# # Data Sets
#
# The data is contained in three files:
#
# * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)
# * profile.json - demographic data for each customer
# * transcript.json - records for transactions, offers received, offers viewed, and offers completed
#
# Here is the schema and explanation of each variable in the files:
#
# **portfolio.json**
# * id (string) - offer id
# * offer_type (string) - type of offer ie BOGO, discount, informational
# * difficulty (int) - minimum required spend to complete an offer
# * reward (int) - reward given for completing an offer
# * duration (int) - time for offer to be open, in days
# * channels (list of strings)
#
# **profile.json**
# * age (int) - age of the customer
# * became_member_on (int) - date when customer created an app account
# * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)
# * id (str) - customer id
# * income (float) - customer's income
#
# **transcript.json**
# * event (str) - record description (ie transaction, offer received, offer viewed, etc.)
# * person (str) - customer id
# * time (int) - time in hours since start of test. The data begins at time t=0
# * value - (dict of strings) - either an offer id or transaction amount depending on the record
#
# **Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook.
#
# You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:
#
# <img src="pic1.png"/>
#
# Then you will want to run the above command:
#
# <img src="pic2.png"/>
#
# Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors.
# In[1]:
import pandas as pd
import numpy as np
import math
import json
get_ipython().run_line_magic('matplotlib', 'inline')
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = | pd.read_json('data/profile.json', orient='records', lines=True) | pandas.read_json |
import inspect
import os
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.model_understanding.graphs import visualize_decision_tree
from evalml.pipelines.components import ComponentBase
from evalml.utils.gen_utils import (
SEED_BOUNDS,
_convert_to_woodwork_structure,
_convert_woodwork_types_wrapper,
_rename_column_names_to_numeric,
classproperty,
convert_to_seconds,
drop_rows_with_nans,
get_importable_subclasses,
get_random_seed,
import_or_raise,
infer_feature_types,
jupyter_check,
pad_with_nans,
save_plot
)
@patch('importlib.import_module')
def test_import_or_raise_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml")
with pytest.raises(ImportError, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message")
with pytest.raises(Exception, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_convert_to_seconds():
assert convert_to_seconds("10 s") == 10
assert convert_to_seconds("10 sec") == 10
assert convert_to_seconds("10 second") == 10
assert convert_to_seconds("10 seconds") == 10
assert convert_to_seconds("10 m") == 600
assert convert_to_seconds("10 min") == 600
assert convert_to_seconds("10 minute") == 600
assert convert_to_seconds("10 minutes") == 600
assert convert_to_seconds("10 h") == 36000
assert convert_to_seconds("10 hr") == 36000
assert convert_to_seconds("10 hour") == 36000
assert convert_to_seconds("10 hours") == 36000
with pytest.raises(AssertionError, match="Invalid unit."):
convert_to_seconds("10 years")
def test_get_random_seed_rng():
def make_mock_random_state(return_value):
class MockRandomState(np.random.RandomState):
def __init__(self):
self.min_bound = None
self.max_bound = None
super().__init__()
def randint(self, min_bound, max_bound):
self.min_bound = min_bound
self.max_bound = max_bound
return return_value
return MockRandomState()
rng = make_mock_random_state(42)
assert get_random_seed(rng) == 42
assert rng.min_bound == SEED_BOUNDS.min_bound
assert rng.max_bound == SEED_BOUNDS.max_bound
def test_get_random_seed_int():
# ensure the invariant "min_bound < max_bound" is enforced
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=0)
with pytest.raises(ValueError):
get_random_seed(0, min_bound=0, max_bound=-1)
# test default boundaries to show the provided value should modulate within the default range
assert get_random_seed(SEED_BOUNDS.max_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.max_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.max_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.max_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.max_bound + 2) == SEED_BOUNDS.min_bound + 2
assert get_random_seed(SEED_BOUNDS.min_bound - 2) == SEED_BOUNDS.max_bound - 2
assert get_random_seed(SEED_BOUNDS.min_bound - 1) == SEED_BOUNDS.max_bound - 1
assert get_random_seed(SEED_BOUNDS.min_bound) == SEED_BOUNDS.min_bound
assert get_random_seed(SEED_BOUNDS.min_bound + 1) == SEED_BOUNDS.min_bound + 1
assert get_random_seed(SEED_BOUNDS.min_bound + 2) == SEED_BOUNDS.min_bound + 2
# vectorize get_random_seed via a wrapper for easy evaluation
default_min_bound = inspect.signature(get_random_seed).parameters['min_bound'].default
default_max_bound = inspect.signature(get_random_seed).parameters['max_bound'].default
assert default_min_bound == SEED_BOUNDS.min_bound
assert default_max_bound == SEED_BOUNDS.max_bound
def get_random_seed_vec(min_bound=None, max_bound=None): # passing None for either means no value is provided to get_random_seed
def get_random_seed_wrapper(random_seed):
return get_random_seed(random_seed,
min_bound=min_bound if min_bound is not None else default_min_bound,
max_bound=max_bound if max_bound is not None else default_max_bound)
return np.vectorize(get_random_seed_wrapper)
# ensure that regardless of the setting of min_bound and max_bound, the output of get_random_seed always stays
# between the min_bound (inclusive) and max_bound (exclusive), and wraps neatly around that range using modular arithmetic.
vals = np.arange(-100, 100)
def make_expected_values(vals, min_bound, max_bound):
return np.array([i if (min_bound <= i and i < max_bound) else ((i - min_bound) % (max_bound - min_bound)) + min_bound
for i in vals])
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=None)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=None, max_bound=10)(vals),
make_expected_values(vals, min_bound=SEED_BOUNDS.min_bound, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=None)(vals),
make_expected_values(vals, min_bound=-10, max_bound=SEED_BOUNDS.max_bound))
np.testing.assert_equal(get_random_seed_vec(min_bound=0, max_bound=5)(vals),
make_expected_values(vals, min_bound=0, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=0)(vals),
make_expected_values(vals, min_bound=-5, max_bound=0))
np.testing.assert_equal(get_random_seed_vec(min_bound=-5, max_bound=5)(vals),
make_expected_values(vals, min_bound=-5, max_bound=5))
np.testing.assert_equal(get_random_seed_vec(min_bound=5, max_bound=10)(vals),
make_expected_values(vals, min_bound=5, max_bound=10))
np.testing.assert_equal(get_random_seed_vec(min_bound=-10, max_bound=-5)(vals),
make_expected_values(vals, min_bound=-10, max_bound=-5))
def test_class_property():
class MockClass:
name = "MockClass"
@classproperty
def caps_name(cls):
return cls.name.upper()
assert MockClass.caps_name == "MOCKCLASS"
def test_get_importable_subclasses_wont_get_custom_classes():
class ChildClass(ComponentBase):
pass
assert ChildClass not in get_importable_subclasses(ComponentBase)
@patch('importlib.import_module')
def test_import_or_warn_errors(dummy_importlib):
def _mock_import_function(library_str):
if library_str == "_evalml":
raise ImportError("Mock ImportError executed!")
if library_str == "attr_error_lib":
raise Exception("Mock Exception executed!")
dummy_importlib.side_effect = _mock_import_function
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'"):
import_or_raise("_evalml", warning=True)
with pytest.warns(UserWarning, match="Missing optional dependency '_evalml'. Please use pip to install _evalml. Additional error message"):
import_or_raise("_evalml", "Additional error message", warning=True)
with pytest.warns(UserWarning, match="An exception occurred while trying to import `attr_error_lib`: Mock Exception executed!"):
import_or_raise("attr_error_lib", warning=True)
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check_errors(mock_import_or_raise):
mock_import_or_raise.side_effect = ImportError
assert not jupyter_check()
mock_import_or_raise.side_effect = Exception
assert not jupyter_check()
@patch('evalml.utils.gen_utils.import_or_raise')
def test_jupyter_check(mock_import_or_raise):
mock_import_or_raise.return_value = MagicMock()
mock_import_or_raise().core.getipython.get_ipython.return_value = True
assert jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = False
assert not jupyter_check()
mock_import_or_raise().core.getipython.get_ipython.return_value = None
assert not jupyter_check()
def _check_equality(data, expected, check_index_type=True):
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(data, expected, check_index_type)
else:
pd.testing.assert_frame_equal(data, expected, check_index_type)
@pytest.mark.parametrize("data,num_to_pad,expected",
[(pd.Series([1, 2, 3]), 1, pd.Series([np.nan, 1, 2, 3])),
(pd.Series([1, 2, 3]), 0, pd.Series([1, 2, 3])),
(pd.Series([1, 2, 3, 4], index=pd.date_range("2020-10-01", "2020-10-04")),
2, pd.Series([np.nan, np.nan, 1, 2, 3, 4])),
(pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]}), 0,
pd.DataFrame({"a": [1., 2., 3.], "b": [4., 5., 6.]})),
(pd.DataFrame({"a": [4, 5, 6], "b": ["a", "b", "c"]}), 1,
pd.DataFrame({"a": [np.nan, 4, 5, 6], "b": [np.nan, "a", "b", "c"]})),
(pd.DataFrame({"a": [1, 0, 1]}), 2,
pd.DataFrame({"a": [np.nan, np.nan, 1, 0, 1]}))])
def test_pad_with_nans(data, num_to_pad, expected):
padded = pad_with_nans(data, num_to_pad)
_check_equality(padded, expected)
def test_pad_with_nans_with_series_name():
name = "data to pad"
data = pd.Series([1, 2, 3], name=name)
padded = pad_with_nans(data, 1)
_check_equality(padded, pd.Series([np.nan, 1, 2, 3], name=name))
@pytest.mark.parametrize("data, expected",
[([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [1., 2., 3, None]})],
[pd.Series([1., 2.], index=pd.Int64Index([1, 2])),
pd.DataFrame({"a": [2., 3.]}, index=pd.Int64Index([1, 2]))]),
([pd.Series([None, 1., 2., 3]), pd.DataFrame({"a": [3., 4., None, None]})],
[pd.Series([1.], index=pd.Int64Index([1])),
pd.DataFrame({"a": [4.]}, index=pd.Int64Index([1]))]),
([pd.DataFrame(), pd.Series([None, 1., 2., 3.])],
[pd.DataFrame(), pd.Series([1., 2., 3.], index=pd.Int64Index([1, 2, 3]))]),
([pd.DataFrame({"a": [1., 2., None]}), pd.Series([])],
[pd.DataFrame({"a": [1., 2.]}), pd.Series([])])
])
def test_drop_nan(data, expected):
no_nan_1, no_nan_2 = drop_rows_with_nans(*data)
_check_equality(no_nan_1, expected[0], check_index_type=False)
_check_equality(no_nan_2, expected[1], check_index_type=False)
def test_rename_column_names_to_numeric():
X = np.array([[1, 2], [3, 4]])
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame(X))
X = pd.DataFrame({"<>": [1, 2], ">>": [2, 4]})
pd.testing.assert_frame_equal(_rename_column_names_to_numeric(X), pd.DataFrame({0: [1, 2], 1: [2, 4]}))
X = ww.DataTable(pd.DataFrame({"<>": [1, 2], ">>": [2, 4]}), logical_types={"<>": "categorical", ">>": "categorical"})
X_renamed = _rename_column_names_to_numeric(X)
X_expected = pd.DataFrame({0: pd.Series([1, 2], dtype="category"), 1: pd.Series([2, 4], dtype="category")})
pd.testing.assert_frame_equal(X_renamed.to_dataframe(), X_expected)
assert X_renamed.logical_types == {0: ww.logical_types.Categorical, 1: ww.logical_types.Categorical}
def test_convert_woodwork_types_wrapper_with_nan():
y = _convert_woodwork_types_wrapper(pd.Series([1, 2, None], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, np.nan], dtype="float64"))
y = _convert_woodwork_types_wrapper(pd.array([1, 2, None], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, np.nan], dtype="float64"))
y = _convert_woodwork_types_wrapper(pd.Series(["a", "b", None], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", np.nan], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.array(["a", "b", None], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", np.nan], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.Series([True, False, None], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, np.nan]))
y = _convert_woodwork_types_wrapper(pd.array([True, False, None], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, np.nan]))
def test_convert_woodwork_types_wrapper():
y = _convert_woodwork_types_wrapper(pd.Series([1, 2, 3], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, 3], dtype="int64"))
y = _convert_woodwork_types_wrapper(pd.array([1, 2, 3], dtype="Int64"))
pd.testing.assert_series_equal(y, pd.Series([1, 2, 3], dtype="int64"))
y = _convert_woodwork_types_wrapper(pd.Series(["a", "b", "a"], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", "a"], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.array(["a", "b", "a"], dtype="string"))
pd.testing.assert_series_equal(y, pd.Series(["a", "b", "a"], dtype="object"))
y = _convert_woodwork_types_wrapper(pd.Series([True, False, True], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, True], dtype="bool"))
y = _convert_woodwork_types_wrapper(pd.array([True, False, True], dtype="boolean"))
pd.testing.assert_series_equal(y, pd.Series([True, False, True], dtype="bool"))
def test_convert_woodwork_types_wrapper_series_name():
name = "my series name"
series_with_name = pd.Series([1, 2, 3], name=name)
y = _convert_woodwork_types_wrapper(series_with_name)
assert y.name == name
def test_convert_woodwork_types_wrapper_dataframe():
X = pd.DataFrame({"Int series": pd.Series([1, 2, 3], dtype="Int64"),
"Int array": pd.array([1, 2, 3], dtype="Int64"),
"Int series with nan": pd.Series([1, 2, None], dtype="Int64"),
"Int array with nan": pd.array([1, 2, None], dtype="Int64"),
"string series": pd.Series(["a", "b", "a"], dtype="string"),
"string array": pd.array(["a", "b", "a"], dtype="string"),
"string series with nan": pd.Series(["a", "b", None], dtype="string"),
"string array with nan": pd.array(["a", "b", None], dtype="string"),
"boolean series": pd.Series([True, False, True], dtype="boolean"),
"boolean array": pd.array([True, False, True], dtype="boolean"),
"boolean series with nan": pd.Series([True, False, None], dtype="boolean"),
"boolean array with nan": pd.array([True, False, None], dtype="boolean")
})
X_expected = pd.DataFrame({"Int series": pd.Series([1, 2, 3], dtype="int64"),
"Int array": pd.array([1, 2, 3], dtype="int64"),
"Int series with nan": pd.Series([1, 2, np.nan], dtype="float64"),
"Int array with nan": pd.array([1, 2, np.nan], dtype="float64"),
"string series": pd.Series(["a", "b", "a"], dtype="object"),
"string array": pd.array(["a", "b", "a"], dtype="object"),
"string series with nan": pd.Series(["a", "b", np.nan], dtype="object"),
"string array with nan": pd.array(["a", "b", np.nan], dtype="object"),
"boolean series": pd.Series([True, False, True], dtype="bool"),
"boolean array": pd.array([True, False, True], dtype="bool"),
"boolean series with nan": pd.Series([True, False, np.nan], dtype="object"),
"boolean array with nan": pd.array([True, False, np.nan], dtype="object")
})
pd.testing.assert_frame_equal(X_expected, _convert_woodwork_types_wrapper(X))
def test_convert_to_woodwork_structure():
X_dt = ww.DataTable(pd.DataFrame([[1, 2], [3, 4]]))
pd.testing.assert_frame_equal(X_dt.to_dataframe(), _convert_to_woodwork_structure(X_dt).to_dataframe())
X_dc = ww.DataColumn(pd.Series([1, 2, 3, 4]))
pd.testing.assert_series_equal(X_dc.to_series(), _convert_to_woodwork_structure(X_dc).to_series())
X_pd = pd.DataFrame({0: pd.Series([1, 2], dtype="Int64"),
1: pd.Series([3, 4], dtype="Int64")})
pd.testing.assert_frame_equal(X_pd, _convert_to_woodwork_structure(X_pd).to_dataframe())
X_pd = pd.Series([1, 2, 3, 4], dtype="Int64")
pd.testing.assert_series_equal(X_pd, _convert_to_woodwork_structure(X_pd).to_series())
X_list = [1, 2, 3, 4]
X_expected = ww.DataColumn(pd.Series(X_list))
pd.testing.assert_series_equal(X_expected.to_series(), _convert_to_woodwork_structure(X_list).to_series())
assert X_list == [1, 2, 3, 4]
X_np = np.array([1, 2, 3, 4])
X_expected = ww.DataColumn(pd.Series(X_np))
pd.testing.assert_series_equal(X_expected.to_series(), _convert_to_woodwork_structure(X_np).to_series())
assert np.array_equal(X_np, np.array([1, 2, 3, 4]))
X_np = np.array([[1, 2], [3, 4]])
X_expected = ww.DataTable(pd.DataFrame(X_np))
pd.testing.assert_frame_equal(X_expected.to_dataframe(), _convert_to_woodwork_structure(X_np).to_dataframe())
assert np.array_equal(X_np, np.array([[1, 2], [3, 4]]))
def test_convert_to_woodwork_structure_series_name():
name = "column with name"
X_pd = pd.Series([1, 2, 3, 4], dtype="Int64", name=name)
X_dc = _convert_to_woodwork_structure(X_pd)
assert X_dc.name == name
pd.testing.assert_series_equal(X_pd, X_dc.to_series())
def test_infer_feature_types_dataframe():
X_pd = pd.DataFrame({0: pd.Series([1, 2]),
1: pd.Series([3, 4])})
pd.testing.assert_frame_equal(X_pd, infer_feature_types(X_pd).to_dataframe(), check_dtype=False)
X_pd = pd.DataFrame({0: pd.Series([1, 2], dtype="Int64"),
1: pd.Series([3, 4], dtype="Int64")})
pd.testing.assert_frame_equal(X_pd, infer_feature_types(X_pd).to_dataframe())
X_expected = X_pd.copy()
X_expected[0] = X_expected[0].astype("category")
pd.testing.assert_frame_equal(X_expected, infer_feature_types(X_pd, {0: "categorical"}).to_dataframe())
pd.testing.assert_frame_equal(X_expected, infer_feature_types(X_pd, {0: ww.logical_types.Categorical}).to_dataframe())
def test_infer_feature_types_series():
X_pd = pd.Series([1, 2, 3, 4])
X_expected = X_pd.astype("Int64")
pd.testing.assert_series_equal(X_expected, infer_feature_types(X_pd).to_series())
X_pd = pd.Series([1, 2, 3, 4], dtype="Int64")
pd.testing.assert_series_equal(X_pd, infer_feature_types(X_pd).to_series())
X_pd = | pd.Series([1, 2, 3, 4], dtype="Int64") | pandas.Series |
"""
Functions to scrape by season, games, and date range
"""
import hockey_scraper.json_schedule as json_schedule
import hockey_scraper.game_scraper as game_scraper
import hockey_scraper.shared as shared
import pandas as pd
import time
import random
# This hold the scraping errors in a string format.
# This may seem pointless but I have a personal reason for it (I think...)
errors = ''
def print_errors():
"""
Print errors with scraping.
Also puts errors in the "error" string (would just print the string but it would look like shit on one line. I
could store it as I "should" print it but that isn't how I want it).
:return: None
"""
global errors
if game_scraper.broken_pbp_games:
print('\nBroken pbp:')
errors += 'Broken pbp:'
for x in game_scraper.broken_pbp_games:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
if game_scraper.broken_shifts_games:
print('\nBroken shifts:')
errors += 'Broken shifts:'
for x in game_scraper.broken_shifts_games:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
if game_scraper.players_missing_ids:
print("\nPlayers missing ID's:")
errors += "Players missing ID's:"
for x in game_scraper.players_missing_ids:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
if game_scraper.missing_coords:
print('\nGames missing coordinates:')
errors += 'Games missing coordinates:'
for x in game_scraper.missing_coords:
print(x[0], x[1])
errors = ' '.join([errors, str(x[0]), ","])
print('\n')
# Clear them all out for the next call
game_scraper.broken_shifts_games = []
game_scraper.broken_pbp_games = []
game_scraper.players_missing_ids = []
game_scraper.missing_coords = []
def check_data_format(data_format):
"""
Checks if data_format specified (if it is at all) is either None, 'Csv', or 'pandas'.
It exits program with error message if input isn't good.
:param data_format: data_format provided
:return: Boolean - True if good
"""
if not data_format or data_format.lower() not in ['csv', 'pandas']:
raise shared.HaltException('{} is an unspecified data format. The two options are Csv and Pandas '
'(Csv is default)\n'.format(data_format))
def check_valid_dates(from_date, to_date):
"""
Check if it's a valid date range
:param from_date: date should scrape from
:param to_date: date should scrape to
:return: None
"""
try:
if time.strptime(to_date, "%Y-%m-%d") < time.strptime(from_date, "%Y-%m-%d"):
raise shared.HaltException("Error: The second date input is earlier than the first one")
except ValueError:
raise shared.HaltException("Error: Incorrect format given for dates. They must be given like 'yyyy-mm-dd' "
"(ex: '2016-10-01').")
def to_csv(file_name, pbp_df, shifts_df):
"""
Write DataFrame(s) to csv file(s)
:param file_name: name of file
:param pbp_df: pbp DataFrame
:param shifts_df: shifts DataFrame
:return: None
"""
if pbp_df is not None:
print("\nPbp data deposited in file - " + 'nhl_pbp{}.csv'.format(file_name))
pbp_df.to_csv('nhl_pbp{}.csv'.format(file_name), sep=',', encoding='utf-8',index=False)
if shifts_df is not None:
print("Shift data deposited in file - " + 'nhl_shifts{}.csv'.format(file_name))
shifts_df.to_csv('nhl_shifts{}.csv'.format(file_name), sep=',', encoding='utf-8',index=False)
def scrape_list_of_games(games, if_scrape_shifts):
"""
Given a list of game_id's (and a date for each game) it scrapes them
:param games: list of [game_id, date]
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:return: DataFrame of pbp info, also shifts if specified
"""
pbp_dfs = []
shifts_dfs = []
for game in games:
pbp_df, shifts_df = game_scraper.scrape_game(str(game["game_id"]), game["date"], if_scrape_shifts)
if pbp_df is not None:
pbp_dfs.extend([pbp_df])
if shifts_df is not None:
shifts_dfs.extend([shifts_df])
# Check if any games...if not let's get out of here
if len(pbp_dfs) == 0:
return None, None
else:
pbp_df = pd.concat(pbp_dfs)
pbp_df = pbp_df.reset_index(drop=True)
pbp_df.apply(lambda row: game_scraper.check_goalie(row), axis=1)
if if_scrape_shifts:
shifts_df = pd.concat(shifts_dfs)
shifts_df = shifts_df.reset_index(drop=True)
else:
shifts_df = None
# Print all errors associated with scrape call
print_errors()
return pbp_df, shifts_df
def scrape_date_range(from_date, to_date, if_scrape_shifts, data_format='csv', preseason=False, rescrape=False, docs_dir=None):
"""
Scrape games in given date range
:param from_date: date you want to scrape from
:param to_date: date you want to scrape to
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:param data_format: format you want data in - csv or pandas (csv is default)
:param preseason: Boolean indicating whether to include preseason games (default if False)
This is may or may not work!!! I don't give a shit.
:param rescrape: If you want to rescrape pages already scraped. Only applies if you supply a docs dir. (def. = None)
:param docs_dir: Directory that either contains previously scraped docs or one that you want them to be deposited
in after scraping. (default is None)
:return: Dictionary with DataFrames and errors or None
"""
# First check if the inputs are good
check_data_format(data_format)
check_valid_dates(from_date, to_date)
# Check on the docs_dir and re_scrape
shared.add_dir(docs_dir)
shared.if_rescrape(rescrape)
games = json_schedule.scrape_schedule(from_date, to_date, preseason)
pbp_df, shifts_df = scrape_list_of_games(games, if_scrape_shifts)
if data_format.lower() == 'csv':
to_csv(from_date+'--'+to_date, pbp_df, shifts_df)
else:
return {"pbp": pbp_df, "shifts": shifts_df, "errors": errors} if if_scrape_shifts else {"pbp": pbp_df,
"errors": errors}
def scrape_seasons(seasons, if_scrape_shifts, data_format='csv', preseason=False, rescrape=False, docs_dir=None):
"""
Given list of seasons it scrapes all the seasons
:param seasons: list of seasons
:param if_scrape_shifts: Boolean indicating whether to also scrape shifts
:param data_format: format you want data in - csv or pandas (csv is default)
:param preseason: Boolean indicating whether to include preseason games (default if False)
This is may or may not work!!! I don't give a shit.
:param rescrape: If you want to rescrape pages already scraped. Only applies if you supply a docs dir.
:param docs_dir: Directory that either contains previously scraped docs or one that you want them to be deposited
in after scraping
:return: Dictionary with DataFrames and errors or None
"""
# First check if the inputs are good
check_data_format(data_format)
# Check on the docs_dir and re_scrape
shared.add_dir(docs_dir)
shared.if_rescrape(rescrape)
# Holds all seasons scraped (if not csv)
master_pbps, master_shifts = [], []
for season in seasons:
from_date = '-'.join([str(season), '9', '1'])
to_date = '-'.join([str(season + 1), '7', '1'])
games = json_schedule.scrape_schedule(from_date, to_date, preseason)
pbp_df, shifts_df = scrape_list_of_games(games, if_scrape_shifts)
if data_format.lower() == 'csv':
to_csv(str(season)+str(season+1), pbp_df, shifts_df)
else:
master_pbps.append(pbp_df)
master_shifts.append(shifts_df)
if data_format.lower() == 'pandas':
if if_scrape_shifts:
return {"pbp": | pd.concat(master_pbps) | pandas.concat |
# %%
import airtablecache.airtablecache as AC
import pandas as pd
import os
import sys
testDataLoc = os.path.join(os.path.dirname(sys.modules['airtablecache'].__file__), 'data/testData.csv')
sampleDF = pd.DataFrame([
['Alpha', 10, 'India'],
['Beta', 15, 'Australia']
], columns=['Name', 'Age', 'Country'])
newDF = pd.DataFrame([
['Gamma', 20, 'Canada'],
['Beta', 12, 'Australia'],
['Alpha', 10, 'India']
], columns=['Name', 'Age', 'Country'])
appendDF = pd.DataFrame([
['Alpha', 10, 'India'],
['Beta', 15, 'Australia'],
['Gamma', 20, 'Canada'],
['Beta', 12, 'Australia'],
['Alpha', 10, 'India']
], columns=['Name', 'Age', 'Country'])
appendDFDropDupl = pd.DataFrame([
['Alpha', 10, 'India'],
['Beta', 15, 'Australia'],
['Gamma', 20, 'Canada'],
['Beta', 12, 'Australia'],
], columns=['Name', 'Age', 'Country'])
partitionByNameDF = pd.DataFrame([
['Gamma', 20, 'Canada'],
['Beta', 12, 'Australia'],
['Alpha', 10, 'India'],
], columns=['Name', 'Age', 'Country'])
partitionByNameAgeDF = pd.DataFrame([
['Beta', 15, 'Australia'],
['Gamma', 20, 'Canada'],
['Beta', 12, 'Australia'],
['Alpha', 10, 'India'],
], columns=['Name', 'Age', 'Country'])
class TestClass:
def test_readExistingDataCSV(self):
ac = AC.Cacher('csv', location=testDataLoc)
df = ac.readExistingData()
| pd.testing.assert_frame_equal(df, sampleDF) | pandas.testing.assert_frame_equal |
import random
import timeit
from decimal import Decimal
import h5py
import hdf5plugin
import numpy as np
import pandas as pd
import gym
from gym import logger
from gym import spaces
import matplotlib.pyplot as plt
import os
from decimal import getcontext
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
| pd.set_option('display.float_format', lambda x: '%.10f' % x) | pandas.set_option |
from datetime import datetime
from pandas.compat import range, long, zip
from pandas import compat
import re
import numpy as np
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas.util.decorators import cache_readonly
import pandas.tseries.offsets as offsets
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_MS = 10000
FR_US = 11000
FR_NS = 12000
class Resolution(object):
RESO_US = tslib.US_RESO
RESO_MS = tslib.MS_RESO
RESO_SEC = tslib.S_RESO
RESO_MIN = tslib.T_RESO
RESO_HR = tslib.H_RESO
RESO_DAY = tslib.D_RESO
_reso_str_map = {
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
RESO_DAY: 'day'}
_str_reso_map = dict([(v, k) for k, v in | compat.iteritems(_reso_str_map) | pandas.compat.iteritems |
# Title: Weather Data Aggregator
# Description: Aggregates data from the weather station on Cockcroft from the OnCall API.
# Author: <NAME>
# Date: 17/12/2020
# Version: 1.0
# Import libraries
import pandas as pd
from pandas import json_normalize
import json
import requests
from datetime import datetime, timedelta
from app import csvDump
import os
# Variable Declarations
URL = "http://172.16.17.32/panasense.oncall.finestra.live/api"
ENDPOINT = "/dailypollarchive"
DEVICE = "0FF00FFA2DBB4A029D2902CD33A43364" # Cockcroft Weather Station GUID
ACTION_IDENT_TEMP = "AD7396F9F28D4DA798F0370934C368A9" # Air Tempertaure in C endpoint GUID
ACTION_IDENT_HUM = "8C5DAA6DB83E4E5C8310A27F6E549527" # Relative Humidity endpoint GUID
ACTION_IDENT_PRE = "E589656878094D03A1554197DC90B5B5" # Pressure endpoint GUID
ACTION_IDENT_RF_MM = "90828B8769E74A5B9F74761335CB1676" # Rainfall in mm endpoint GUID
ACTION_IDENT_WS_MS = "B04BE963E74F467A875C534B90BE05A0" # Windspeed in ms endpoint GUID
ACTION_IDENT_WD_D = "752FC7FCFE584FBF980E2FFCAD991D87" # Wind direction endpoint GUID
ACTION_IDENT_SOL_KWM2 = "4EF9B920C87444939DE8069D37ECA200" # Solar Radiation endpoint GUID
START = "2021-03-01T00:00:00"
END = "2021-09-08T23:00:00"
dropCols = ['RECID','Limit','DeviceGUID','ActionGUID','PollType','RV']
# POST credentials
with open("./config/.onCallAPI.json") as f:
accessToken = json.load(f)
API_KEY = accessToken['TOKEN']
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" # Date format for parsing datetime returned by OnCall API
sd = | pd.DataFrame() | pandas.DataFrame |
import typer
import spotipy
import pandas as pd
import os
from loguru import logger
from spotify_smart_playlists.helpers import spotify_auth
from toolz import thread_last, mapcat, partition_all
from typing import List
def main(library_file: str, artists_file: str):
logger.info("Initializing Spotify client.")
spotify = spotipy.Spotify(client_credentials_manager=spotify_auth())
logger.info(f"Reading library from {library_file}.")
library_frame = pd.read_csv(library_file)
if not os.path.exists(artists_file):
logger.warning(
f"{artists_file} doesn't exist. " "Obtaining all artists."
)
artists_frame = pd.DataFrame()
artists_with_data = set()
else:
logger.info(f"Reading existing artists from {artists_file}.")
artists_frame = | pd.read_csv(artists_file) | pandas.read_csv |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
( | Week(weekday=2) | pandas.core.datetools.Week |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import pandas as pd
import matplotlib.dates as mdates
from matplotlib import pyplot
import matplotlib.ticker as ticker
import datetime
import calendar
import os
import time
import numpy as np
def get_ndvi_profiles_from_csv(csv_file):
ndvi_profile = pd.read_csv(csv_file)
return ndvi_profile
def get_current_list_of_months(first_year_month, number_of_year_months):
textstrs_tuples = [
("201701", "2017\nJAN"),
("201702", "2017\nFEB"),
("201703", "2017\nMAR"),
("201704", "2017\nAPR"),
("201705", "2017\nMAY"),
("201706", "2017\nJUN"),
("201707", "2017\nJUL"),
("201708", "2017\nAUG"),
("201709", "2017\nSEP"),
("201710", "2017\nOCT"),
("201711", "2017\nNOV"),
("201712", "2017\nDEC"),
("201801", "2018\nJAN"),
("201802", "2018\nFEB"),
("201803", "2018\nMAR"),
("201804", "2018\nAPR"),
("201805", "2018\nMAY"),
("201806", "2018\nJUN"),
("201807", "2018\nJUL"),
("201808", "2018\nAUG"),
("201809", "2018\nSEP"),
("201810", "2018\nOCT"),
("201811", "2018\nNOV"),
("201812", "2018\nDEC"),
("201901", "2019\nJAN"),
("201902", "2019\nFEB"),
("201903", "2019\nMAR"),
("201904", "2019\nAPR"),
("201905", "2019\nMAY"),
("201906", "2019\nJUN"),
("201907", "2019\nJUL"),
("201908", "2019\nAUG"),
("201909", "2019\nSEP"),
("201910", "2019\nOCT"),
("201911", "2019\nNOV"),
("201912", "2019\nDEC"),
("202001", "2020\nJAN"),
("202002", "2020\nFEB"),
("202003", "2020\nMAR"),
("202004", "2020\nAPR"),
("202005", "2020\nMAY"),
("202006", "2020\nJUN"),
("202007", "2020\nJUL"),
("202008", "2020\nAUG"),
("202009", "2020\nSEP"),
("202010", "2020\nOCT"),
("202011", "2020\nNOV"),
("202012", "2020\nDEC"),
("202101", "2021\nJAN"),
("202102", "2021\nFEB"),
("202103", "2021\nMAR"),
("202104", "2021\nAPR"),
("202105", "2021\nMAY"),
("202106", "2021\nJUN"),
("202107", "2021\nJUL"),
("202108", "2021\nAUG"),
("202109", "2021\nSEP"),
("202110", "2021\nOCT"),
("202111", "2021\nNOV"),
("202112", "2021\nDEC"),
]
# find the index of the first occurrence of first_year_month in textstrs_tuples
# and return the rest secend elements of the tuples of the list
i = 0
first_year_month_index = i
for textstrs_tuple in textstrs_tuples:
if first_year_month == textstrs_tuple[0]:
first_year_month_index = i
i+=1
current_textstrs = []
for i in range(first_year_month_index, first_year_month_index + number_of_year_months):
current_textstrs.append(textstrs_tuples[i][1])
return current_textstrs
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def display_ndvi_profiles(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars = False):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
ndvi_profile = ndvi_profile[ndvi_profile['S2 NDVI']!='None']
ndvi_profile['S2 NDVI'] = ndvi_profile['S2 NDVI'].apply(pd.to_numeric)
ndvi_profile['ndvi_std'] = ndvi_profile['ndvi_std'].apply(pd.to_numeric)
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
# if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
# not ndvi_profile['ndvi_std'].dtypes == "float64":
# return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = 'blue', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndvi_profile['date']).date().month
min_year = min(ndvi_profile['date']).date().year
max_month = max(ndvi_profile['date']).date().month
max_year = max(ndvi_profile['date']).date().year
number_of_months = diff_month(max(ndvi_profile['date']).date(), min(ndvi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
# start_x = 0.045
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
ax0.yaxis.set_major_locator(ticker.MultipleLocator(y_tick_spacing))
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\traph_utils.display_ndvi_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_ndwi_profiles(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars = False):
"""
this function plots the NDWI profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndwi_folder = out_tif_folder_base + "/ndwi"
ndwi_csv_file = ndwi_folder + "/" + chip_folder + "_ndwi.csv"
output_graph_folder = out_tif_folder_base + "/ndwi_graphs"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndwi_profile = pd.read_csv(ndwi_csv_file)
ndwi_profile['acq_date'] = pd.to_datetime(ndwi_profile.acq_date)
ndwi_profile = ndwi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndwi_mean' to more meaningful name
ndwi_profile = ndwi_profile.rename(columns={'ndwi_mean': 'S2 NDWI'})
ndwi_profile = ndwi_profile.rename(columns={'acq_date': 'date'})
# check if there are real ndwi values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not ndwi_profile['S2 NDWI'].dtypes == "float64" or \
not ndwi_profile['ndwi_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not ndwi_profile.empty:
if add_error_bars:
ndwi_profile.plot(kind='line', marker='+', x='date',y='S2 NDWI', yerr='ndwi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndwi_profile.plot(kind='line', marker='+', x='date',y='S2 NDWI', color = 'blue', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDWI')
parcelNumber = ndwi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([-1,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndwi_profile['date']).date().month
min_year = min(ndwi_profile['date']).date().year
max_month = max(ndwi_profile['date']).date().month
max_year = max(ndwi_profile['date']).date().year
number_of_months = diff_month(max(ndwi_profile['date']).date(), min(ndwi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
# start_x = 0.045
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
ax0.yaxis.set_major_locator(ticker.MultipleLocator(y_tick_spacing))
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_ndwi.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\traph_utils.display_ndwi_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndwi_profile
def display_ndvi_profiles_with_mean_profile_of_the_crop(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars = False):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
mean_profile_folder = "c:/Users/Csaba/ownCloud/GTCAP/cbm_qa/be_fl/notebooks/output_csv_selected_v02"
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
mean_ndvi_csv_file = mean_profile_folder + "/" + crop #+ ".csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs_with_mean"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
mean_ndvi_csv_file_exists = False
if os.path.isfile(mean_ndvi_csv_file):
mean_ndvi_csv_file_exists = True
mean_ndvi_profile = pd.read_csv(mean_ndvi_csv_file)
mean_ndvi_profile['acq_date'] = pd.to_datetime(mean_ndvi_profile.acq_date)
mean_ndvi_profile = mean_ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI mean'})
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'acq_date': 'date'})
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
not ndvi_profile['ndvi_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = 'blue', ax=ax0)
if mean_ndvi_csv_file_exists:
mean_ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI mean', color = 'red', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndvi_profile['date']).date().month
min_year = min(ndvi_profile['date']).date().year
max_month = max(ndvi_profile['date']).date().month
max_year = max(ndvi_profile['date']).date().year
number_of_months = diff_month(max(ndvi_profile['date']).date(), min(ndvi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\graph_utils.display_ndvi_profiles_with_mean_profile_of_the_crop:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_ndvi_profiles_with_mean_profile_of_the_crop_with_std(parcel_id, crop, plot_title, out_tif_folder_base,
logfile, mean_profile_folder,
add_error_bars = False,
mean_color = 'green', current_color = 'magenta'):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
mean_ndvi_csv_file = mean_profile_folder + "/" + crop + ".csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs_with_mean"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
mean_ndvi_csv_file_exists = False
if os.path.isfile(mean_ndvi_csv_file):
mean_ndvi_csv_file_exists = True
mean_ndvi_profile = pd.read_csv(mean_ndvi_csv_file)
mean_ndvi_profile['acq_date'] = pd.to_datetime(mean_ndvi_profile.acq_date)
mean_ndvi_profile = mean_ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI mean'})
mean_ndvi_profile = mean_ndvi_profile.rename(columns={'acq_date': 'date'})
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
not ndvi_profile['ndvi_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = current_color, ax=ax0,
capsize=4, ecolor='magenta', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = current_color, ax=ax0)
if mean_ndvi_csv_file_exists:
mean_ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI mean', color = mean_color, ax=ax0)
pyplot.fill_between(mean_ndvi_profile['date'],
mean_ndvi_profile['S2 NDVI mean']-mean_ndvi_profile['ndvi_stdev'],
mean_ndvi_profile['S2 NDVI mean']+mean_ndvi_profile['ndvi_stdev'],
alpha=0.2, color = mean_color)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(ndvi_profile['date']).date().month
min_year = min(ndvi_profile['date']).date().year
max_month = max(ndvi_profile['date']).date().month
max_year = max(ndvi_profile['date']).date().year
number_of_months = diff_month(max(ndvi_profile['date']).date(), min(ndvi_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\graph_utils.display_ndvi_profiles_with_mean_profile_of_the_crop:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_s1_bs_profiles(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars, polarisation, orbit_orientation):
"""
this function plots the backscatter profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
s1_bs_folder = out_tif_folder_base + "/s1_bs"
s1_bs_csv_file = s1_bs_folder + "/" + chip_folder + "_s1bs_" + polarisation + "_" + orbit_orientation + ".csv"
output_graph_folder = out_tif_folder_base + "/s1_bs_graphs_" + polarisation + "_" + orbit_orientation
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
s1_bs_profile = pd.read_csv(s1_bs_csv_file)
s1_bs_profile['acq_date'] = pd.to_datetime(s1_bs_profile.acq_date)
s1_bs_profile = s1_bs_profile.sort_values(by=['acq_date'])
# rename the column names from 'bs_mean' to more meaningful name
s1_bs_profile = s1_bs_profile.rename(columns={'bs_mean': 'S1 BS'})
s1_bs_profile = s1_bs_profile.rename(columns={'acq_date': 'date'})
# check if there are real backscatter values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not s1_bs_profile['S1 BS'].dtypes == "float64" or \
not s1_bs_profile['bs_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not s1_bs_profile.empty:
if add_error_bars:
s1_bs_profile.plot(kind='line', marker='+', x='date',y='S1 BS', yerr='bs_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
s1_bs_profile.plot(kind='line', marker='+', x='date',y='S1 BS', color = 'blue', ax=ax0)
# format the graph a little bit
pyplot.ylabel('Backscatter')
# pyplot.ylabel(r'Backscattering coefficient, $\gamma\degree$ (dB)')
parcelNumber = s1_bs_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(s1_bs_profile['date']).date().month
min_year = min(s1_bs_profile['date']).date().year
max_month = max(s1_bs_profile['date']).date().month
max_year = max(s1_bs_profile['date']).date().year
number_of_months = diff_month(max(s1_bs_profile['date']).date(), min(s1_bs_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
ax0.yaxis.set_major_locator(ticker.MultipleLocator(y_tick_spacing))
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tgraph_utils.display_s1_bs_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return s1_bs_profile
def display_s1_bs_profiles_together(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars):
"""
this function plots the Sentinel-1 backscatter profile and saves the figures to the output_graph_folder
"""
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
s1_bs_folder = out_tif_folder_base + "/s1_bs"
output_graph_folder = out_tif_folder_base + "/s1_bs_graphs_together"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
polarisations = ["VV", "VH"]
orbit_orientations = ["D", "A"]
for polarisation in polarisations:
for orbit_orientation in orbit_orientations:
s1_bs_csv_file = s1_bs_folder + "/" + chip_folder + "_s1bs_" + polarisation + "_" + orbit_orientation + ".csv"
s1_bs_profile = pd.read_csv(s1_bs_csv_file)
s1_bs_profile['acq_date'] = pd.to_datetime(s1_bs_profile.acq_date)
s1_bs_profile = s1_bs_profile.sort_values(by=['acq_date'])
profile_name = 'S1 BS ' + polarisation + " " + orbit_orientation
s1_bs_profile = s1_bs_profile.rename(columns={'bs_mean': profile_name})
s1_bs_profile = s1_bs_profile.rename(columns={'acq_date': 'date'})
s1_bs_profile[profile_name] = s1_bs_profile[profile_name].map(lambda s: 10.0*np.log10(s))
# check if there are real backscatter and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
if not s1_bs_profile[profile_name].dtypes == "float64" or \
not s1_bs_profile['bs_std'].dtypes == "float64":
return
# plot the time series
ax0 = pyplot.gca()
if not s1_bs_profile.empty:
if add_error_bars:
s1_bs_profile.plot(kind='line', marker='+', x='date',y=profile_name, yerr='bs_std', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
s1_bs_profile.plot(kind='line', marker='+', x='date',y=profile_name, ax=ax0)
# format the graph a little bit
pyplot.ylabel(r'Backscattering coefficient, $\gamma\degree$ (dB)')
parcelNumber = s1_bs_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " " + crop)
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(s1_bs_profile['date']).date().month
min_year = min(s1_bs_profile['date']).date().year
max_month = max(s1_bs_profile['date']).date().month
max_year = max(s1_bs_profile['date']).date().year
number_of_months = diff_month(max(s1_bs_profile['date']).date(), min(s1_bs_profile['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the width of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_BS.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tgraph_utils.display_s1_bs_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return s1_bs_profile
def display_s1_coh6_profiles_together(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
add_error_bars):
"""
this function plots the Sentinel-1 backscatter profile and saves the figures to the output_graph_folder
"""
there_is_on_valid_profile = False
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
s1_coh6_folder = out_tif_folder_base + "/s1_coh6"
output_graph_folder = out_tif_folder_base + "/s1_coh6_graphs_together"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
polarisations = ["VV", "VH"]
orbit_orientations = ["D", "A"]
for polarisation in polarisations:
for orbit_orientation in orbit_orientations:
s1_coh6_csv_file = s1_coh6_folder + "/" + chip_folder + "_s1coh6_" + polarisation + "_" + orbit_orientation + ".csv"
s1_coh6_profile = pd.read_csv(s1_coh6_csv_file)
s1_coh6_profile['acq_date'] = pd.to_datetime(s1_coh6_profile.acq_date)
s1_coh6_profile = s1_coh6_profile.sort_values(by=['acq_date'])
profile_name = 'S1 COH6 ' + polarisation + " " + orbit_orientation
s1_coh6_profile = s1_coh6_profile.rename(columns={'coh6_mean': profile_name})
s1_coh6_profile = s1_coh6_profile.rename(columns={'acq_date': 'date'})
# s1_coh6_profile[profile_name] = s1_coh6_profile[profile_name].map(lambda s: 10.0*np.log10(s))
# check if there are real backscatter and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
# if not s1_coh6_profile[profile_name].dtypes == "float64" or \
# not s1_coh6_profile['coh6_std'].dtypes == "float64":
# return
# plot the time series
ax0 = pyplot.gca()
if not s1_coh6_profile.empty:
# set this for later getting the min_month and other paramegters for the graph
s1_coh6_for_min_month = s1_coh6_profile
there_is_on_valid_profile = True
if add_error_bars:
s1_coh6_profile.plot(kind='line', marker='+', x='date',y=profile_name, yerr='coh6_std', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
s1_coh6_profile.plot(kind='line', marker='+', x='date',y=profile_name, ax=ax0)
# format the graph a little bit
if not there_is_on_valid_profile:
return
pyplot.ylabel(r'Coherence')
# parcelNumber = s1_coh6_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcel_id) + " " + crop)
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
min_month = min(s1_coh6_for_min_month['date']).date().month
min_year = min(s1_coh6_for_min_month['date']).date().year
max_month = max(s1_coh6_for_min_month['date']).date().month
max_year = max(s1_coh6_for_min_month['date']).date().year
number_of_months = diff_month(max(s1_coh6_for_min_month['date']).date(), min(s1_coh6_for_min_month['date']).date()) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the width of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_COH6.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tgraph_utils.display_s1_coh6_profiles_together:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return s1_coh6_profile
def display_ndvi_profiles_with_fixed_date_range(parcel_id, crop, plot_title, out_tif_folder_base,
logfile, x_start_date, x_end_date,
parcel_area_ha,
add_error_bars = False):
"""
this function plots the NDVI profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
ndvi_folder = out_tif_folder_base + "/ndvi"
ndvi_csv_file = ndvi_folder + "/" + chip_folder + "_ndvi.csv"
output_graph_folder = out_tif_folder_base + "/ndvi_graphs_fixed_date_range"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
ndvi_profile = pd.read_csv(ndvi_csv_file)
ndvi_profile['acq_date'] = pd.to_datetime(ndvi_profile.acq_date)
ndvi_profile = ndvi_profile.sort_values(by=['acq_date'])
# rename the column names from 'ndvi_mean' to more meaningful name
ndvi_profile = ndvi_profile.rename(columns={'ndvi_mean': 'S2 NDVI'})
ndvi_profile = ndvi_profile.rename(columns={'acq_date': 'date'})
ndvi_profile = ndvi_profile[ndvi_profile['S2 NDVI']!='None']
ndvi_profile['S2 NDVI'] = ndvi_profile['S2 NDVI'].apply(pd.to_numeric)
ndvi_profile['ndvi_std'] = ndvi_profile['ndvi_std'].apply(pd.to_numeric)
# check if there are real NDVI values and stdev values in the dataframe
# (for very small parcels the values in the csv can be None which evaluates as object in
# the dataframe, insted of dtype float64
# if not ndvi_profile['S2 NDVI'].dtypes == "float64" or \
# not ndvi_profile['ndvi_std'].dtypes == "float64":
# return
# plot the time series
ax0 = pyplot.gca()
if not ndvi_profile.empty:
if add_error_bars:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', yerr='ndvi_std', color = 'blue', ax=ax0,
capsize=4, ecolor='grey', barsabove = 'True')
else:
ndvi_profile.plot(kind='line', marker='+', x='date',y='S2 NDVI', color = 'blue', ax=ax0)
# format the graph a little bit
pyplot.ylabel('NDVI')
parcelNumber = ndvi_profile.iloc[0]['Field_ID']
pyplot.title(plot_title + ", Parcel id: " + str(parcelNumber) + " (" + crop + ", " + parcel_area_ha + " ha)")
ax0.set_ylim([0,1])
ax0.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax0.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax0.xaxis.grid() # horizontal lines
ax0.yaxis.grid() # vertical lines
fig = pyplot.gcf()
fig.autofmt_xdate() # Rotation
fig_size_x = 13
fig_size_y = 7
fig.set_size_inches(fig_size_x, fig_size_y)
x_start_date_date = datetime.datetime.strptime(x_start_date, '%Y-%m-%d').date()
x_end_date_date = datetime.datetime.strptime(x_end_date, '%Y-%m-%d').date()
min_month = x_start_date_date.month
min_year = x_start_date_date.year
max_month = x_end_date_date.month
max_year = x_end_date_date.year
number_of_months = diff_month(x_end_date_date, x_start_date_date) + 1
ax0.set_xlim([datetime.date(min_year, min_month, 1),
datetime.date(max_year, max_month,
calendar.monthrange(max_year, max_month)[1])])
min_year_month = str(min_year) + ('0' + str(min_month))[-2:]
# start_x = 0.045
step_x = 1/number_of_months
start_x = step_x/2 # positions are in graph coordinate system between 0 and 1
# so first year_month label is at half the size of the widht of
# one month
loc_y = 0.915
current_year_month_text = get_current_list_of_months(min_year_month, number_of_months)
for current_year_month_index in range (0, number_of_months):
t = current_year_month_text[current_year_month_index]
loc_x = start_x + (current_year_month_index) * step_x
ax0.text(loc_x, loc_y, t, verticalalignment='bottom', horizontalalignment='center', transform=ax0.transAxes,
color='blue', fontsize=13)
ax0.yaxis.set_major_locator(ticker.MultipleLocator(y_tick_spacing))
# save the figure to a jpg file
fig.savefig(output_graph_folder + '/parcel_id_' + str(parcel_id) + '_NDVI.jpg')
pyplot.close(fig)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\traph_utils.display_ndvi_profiles:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return ndvi_profile
def display_index_profiles_with_fixed_date_range(parcel_id, crop, plot_title, out_tif_folder_base, logfile,
x_start_date, x_end_date,
index_name,
add_error_bars = False):
"""
this function plots the Index profile and saves the figures to the outputFolder
"""
y_tick_spacing = 0.1
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
index_folder = out_tif_folder_base + "/" + index_name
index_csv_file = index_folder + "/" + chip_folder + "_" + index_name + ".csv"
output_graph_folder = out_tif_folder_base + "/" + index_name + "_graphs"
if not os.path.exists(output_graph_folder):
os.makedirs(output_graph_folder)
index_profile = | pd.read_csv(index_csv_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from dramkit.gentools import isnull
from dramkit.iotools import load_csv
from dramkit.datetimetools import today_date
from dramkit.datetimetools import get_date_format
from dramkit.datetimetools import date_reformat
from dramkit.datetimetools import get_recent_workday_chncal
from dramkit.datetimetools import get_recent_inweekday_chncal
from dramkit.fintools.utils_chn import get_recent_trade_date_chncal
#%%
def find_target_dir(dir_name):
'''
从不同磁盘分区中搜索dir_name文件夹所做的路径,并返回完整路径
'''
prefix_dirs = [
'D:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/',
'E:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/',
'F:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/',
'G:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/',
'C:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/',
'/media/glhyy/DATA/Genlovy_Hoo/HooProjects/HooFin/data/Archive/'
]
for dr in prefix_dirs:
if os.path.exists(dr+dir_name):
return dr + dir_name
raise ValueError('未找到文件夹`{}`路径,请检查!'.format(dir_name))
#%%
def get_15s_dir():
'''查找15秒K线数据存放路径'''
dirs = ['D:/Genlovy_Hoo/ProjectsSL/data/quotes/cf/',
'E:/Genlovy_Hoo/HooProjects/ProjectsSL/data/quotes/cf/',
'F:/Genlovy_Hoo/HooProjects/ProjectsSL/data/quotes/cf/',
'G:/Genlovy_Hoo/HooProjects/ProjectsSL/data/quotes/cf/',
'C:/Genlovy_Hoo/HooProjects/ProjectsSL/data/quotes/cf/',
'/media/glhyy/DATA/Genlovy_Hoo/HooProjects/ProjectsSL/data/quotes/cf/']
for dr in dirs:
if os.path.exists(dr):
return dr
raise ValueError('未找到数据文件夹路径,请设置15秒K线数据路径!')
def get_1min_dir1():
'''查找1分钟K线数据存放路径'''
dirs = ['D:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/fund_minute/',
'E:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/fund_minute/',
'F:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/fund_minute/',
'G:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/fund_minute/',
'C:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/fund_minute/',
'/media/glhyy/DATA/Genlovy_Hoo/HooProjects/HooFin/data/Archive/fund_minute/']
for dr in dirs:
if os.path.exists(dr):
return dr
raise ValueError('未找到数据文件夹路径,请设置1分钟K线数据路径!')
def get_1min_dir2():
'''查找1分钟K线数据存放路径'''
dirs = ['D:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_minute/qfq/',
'E:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_minute/qfq/',
'F:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_minute/qfq/',
'G:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_minute/qfq/',
'C:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_minute/qfq/',
'/media/glhyy/DATA/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_minute/qfq/']
for dr in dirs:
if os.path.exists(dr):
return dr
raise ValueError('未找到数据文件夹路径,请设置1分钟K线数据路径!')
def get_daily_dir1():
'''查找日K线数据存放路径'''
dirs = ['D:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_daily/qfq/',
'E:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_daily/qfq/',
'F:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_daily/qfq/',
'G:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_daily/qfq/',
'C:/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_daily/qfq/',
'/media/glhyy/DATA/Genlovy_Hoo/HooProjects/HooFin/data/Archive/stocks_daily/qfq/']
for dr in dirs:
if os.path.exists(dr):
return dr
raise ValueError('未找到数据文件夹路径,请设置1分钟K线数据路径!')
#%%
def load_15s_data(code, start_date=None, end_date=None):
'''读取15s数据'''
data_dir = get_15s_dir()
files = [x for x in os.listdir(data_dir) if code in x and '15s' in x]
if start_date:
files = [x for x in files if x[7:15] >= start_date]
if end_date:
files = [x for x in files if x[7:15] <= end_date]
data = []
for file in files:
fpath = data_dir + file
df = load_csv(fpath)
data.append(df)
data = pd.concat(data, axis=0)
# data['date'] = data['date'].apply(lambda x: date_reformat(str(x)))
data['date'] = data['date'].astype(str)
data['time'] = data['date'] + ' ' + data['time']
return data
#%%
def load_minute_data(code, start_date=None, end_date=None):
'''读取分钟数据'''
if code in ['510050', '510030', '159919']:
fpath = get_1min_dir1() + code + '.csv'
else:
fpath = get_1min_dir2() + code + '.csv'
data = load_csv(fpath)
data['date'] = data['time'].apply(lambda x: x[:10])
data['minute'] = data['time'].apply(lambda x: x[11:])
if not isnull(start_date):
data = data[data['date'] >= start_date]
if not isnull(end_date):
data = data[data['date'] <= end_date]
return data
def handle_minute_930(df_minute):
'''处理9.30分开盘行情'''
def get_start(df):
date = df['date'].iloc[0]
start = | pd.DataFrame(df.iloc[0, :]) | pandas.DataFrame |
# from IPython.core.display import display, HTML
# display(HTML("<style>.container { width:100% !important; }</style>"))
_ = None
import argparse
import json as J
import os
import shutil
import tempfile
import joblib
import mlflow
import functools as F
from importlib import reload as rl
import copy
import pandas as pd
import numpy as np
import scipy.stats
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from collections import Counter as C
from sklearn.metrics import accuracy_score
from pylab import ma, cm
from sklearn.utils import Bunch
from sklearn.preprocessing import LabelEncoder
import lightgbm
from tqdm import tqdm
from pymfe.mfe import MFE
import src.models as M
import src.mstream as MS
import src.aux as A
np.random.seed(42)
# https://stackoverflow.com/questions/4971269/<br><br>
from matplotlib.cm import get_cmap
n = "Accent"
cmap = get_cmap(n) # type: matplotlib.colors.ListedColormap
colors = cmap.colors # type: list
PATH = Path(tempfile.mkdtemp())
os.makedirs(PATH/'png')
os.makedirs(PATH/'csv')
os.makedirs(PATH/'joblib')
par = argparse.ArgumentParser()
par.add_argument('--base', type=str, help='Database to use', default='elec2')
par.add_argument('--nrows', type=int, help='How many samples will be used at most', default=30_000)
par.add_argument('--train', type=int, help='Size of train set', default=300)
par.add_argument('--horizon', type=int, help='Size of horizon set', default=0)
par.add_argument('--test', type=int, help='Size of test window', default=10)
# par.add_argument('--metric', help='Metric to use on base models')
par.add_argument('--metabase_initial_size', type=int, help='Size of initial metabase', default=410)
par.add_argument('--online_size', type=int, help='How many metaexamples to test (online phase)', default=100)
par.add_argument('--offline_size', type=int, help='How many metaexamples to test (online phase)', default=100)
par.add_argument('--meta_retrain_interval', type=int, help='How many new metaexample til retrain', default=1)
par.add_argument('--base_retrain_interval', type=int, help='How many new base examples til retrain', default=10)
par.add_argument('--meta_train_window', type=int, help='How many metaexamples to train on', default=300)
par.add_argument('--gamma', type=int,
help='Batch size. Zero means to predict one algorithm to whole window', default=0)
par.add_argument('--is_incremental', type=int, help='To use or not the incremental metamodel', default=0)
par.add_argument('--reverse_models', type=int, help='To use or not reverse models order', default=0)
par.add_argument('--supress_warning', type=int, help='Whether to supress warnings', default=1)
par.add_argument('--choice', type=str, help='Which model will have preference when Tie happens', default='NysSvm')
par.add_argument('--tune', type=int, help='Whether or not to fine tune base models', default=1)
args, rest = par.parse_known_args()
params = Bunch(**args.__dict__)
print(*params.items(), sep='\n')
if args.supress_warning:
A.IGNORE_WARNING()
del args.supress_warning
# args.online_size = 2000
# args.meta_retrain_interval = 1
# args.is_incremental = 0
labelizer = F.partial(A.biggest_labelizer_arbitrary, choice=args.choice)
joblib.dump(labelizer, PATH/'joblib'/'labelizer.joblib')
BASE=Path('csv')
# mapa = {ex.name: ex.experiment_id for ex in mlflow.list_experiments()}
EXPERIMENT_NAME = f'{args.base}_meta'
exp = mlflow.get_experiment_by_name(EXPERIMENT_NAME)
if not exp:
print(f"Criando experimento {EXPERIMENT_NAME} pela primeira vez")
experiment_id = mlflow.create_experiment(name=EXPERIMENT_NAME)
else:
experiment_id = exp.experiment_id
run = mlflow.start_run(experiment_id=experiment_id)
mlflow.log_params(args.__dict__)
META_MODEL='LgbCustomSkWrapper'
MODELS=[
'NysSvm',
'Rf',
]
mlflow.set_tag('meta_model', META_MODEL)
METRIC='acc'
META_METRICS=['acc', 'kappa_custom', 'geometric_mean']
META_RETRAIN_INTERVAL=args.meta_retrain_interval
MT_TRAIN_FEATURES = [
"best_node","elite_nn","linear_discr",
"naive_bayes","one_nn","random_node","worst_node",
"can_cor","cor", "cov","g_mean",
"gravity","h_mean","iq_range","kurtosis",
"lh_trace",
"mad",
"max","mean",
"median",
"min",
"nr_cor_attr","nr_disc","nr_norm","nr_outliers",
"p_trace","range","roy_root","sd","sd_ratio",
"skewness","sparsity","t_mean","var","w_lambda"
]
MT_HOR_FEATURES = []
MT_TEST_FEATURES = []
HP_GRID_LIS = [
{"svm__C": [1,10,100],
"nys__kernel": ['poly', 'rbf', 'sigmoid']
},
{ "max_depth": [3, 5, None],
"n_estimators": [100, 200, 300],
"min_samples_split": scipy.stats.randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]
}
]
HP_META_MODEL = {
'boosting_type': 'dart',
'learning_rate': 0.01,
'tree_learner': 'feature',
'metric': 'multi_error,multi_logloss',
'objective': 'multiclassova',
'num_class': len(MODELS),
'is_unbalance': True,
'verbose': -1,
'seed': 42,
}
if args.reverse_models:
print("reversing..")
MODELS = MODELS[::-1]
HP_GRID_LIS = HP_GRID_LIS[::-1]
mlflow.set_tag('models', MODELS)
mlflow.set_tag('strategy', 'incremental' if args.is_incremental else 'nao-incremental')
mlflow.set_tag('meta-retreinamento', args.meta_retrain_interval)
joblib.dump(HP_META_MODEL, PATH/'joblib'/'hp_meta.joblib')
mlflow.log_params(A.prefixify(HP_META_MODEL, 'metaHp'))
df = pd.read_csv( BASE / f'{args.base}.csv', nrows=args.nrows)
X, y = df.iloc[:, 1:-1].fillna(0), df.iloc[:, -1]
lbe = LabelEncoder()
yt = lbe.fit_transform(y)
# runinfo_lis = mlflow.list_run_infos(EXPERIMENT_ID)
# df_lis = []
# for rinfo in runinfo_lis:
# try:
# df_lis.append(pd.read_csv(
# 'mlruns/{}/{}/artifacts/metabase.csv'.format(
# EXPERIMENT_ID, rinfo.run_id),
# index_col=False)
# )
# except:
# pass
# df_cache = pd.concat(df_lis, axis=1)
# class CacheMtF:
# def extractor(self, df_cache, prefix='tr'):
# test_cols = [i for i in df_cache.columns
# if i.startswith(prefix)]
# df = df_cache[test_cols]
# df = df.rename(lambda x: '_'.join(x.split('_')[1:]), axis=1)
# for mtf in df.apply(lambda x: x.to_dict(), axis=1):
# yield mtf
# def __init__(self, df_cache, prefix):
# self.generator = self.extractor(df_cache, prefix)
# def __call__(self, *args, **kwargs):
# return next(self.generator)
# train_extractor = CacheMtF(df_cache, 'tr')
# test_extractor = CacheMtF(df_cache, 'tes')
rl(M)
rl(A)
train_extractor = F.partial(A.su_extractor, ext=MFE(
features=MT_TRAIN_FEATURES,
random_state=42,
))
horizon_extractor = lambda x: {}
test_extractor = lambda x: {}
meta_model = M.CLF[META_MODEL](
fit_params=HP_META_MODEL,
classes=[m for m in MODELS],
)
models = [
Bunch(name=n, model=M.CLF[n]())
for n in MODELS
]
opt_params = A.random_params.copy()
opt_params['cv'] = args.test
def fun(model, x, y, retrain_window = META_RETRAIN_INTERVAL):
x = x[-retrain_window:]
y = y[-retrain_window:]
model.fit(x, y, incremental=True)
return model
incremental_trainer = fun if args.is_incremental else None
if args.tune:
print("SOME TUNING...")
optmize_data = args.metabase_initial_size * args.test + args.train
for m, hp in zip(models, HP_GRID_LIS):
A.random_tuner(
model=m.model,
params=hp,
opt_params=opt_params,
X=X[:optmize_data], y=yt[:optmize_data],
)
else:
print("NO TUNING AT ALL")
for m in models:
mlflow.sklearn.log_model(m.model, m.name)
#
# - Nota: faz sentido rodar uma vez com tudo e, depois, só carregar isso (ocupa espaço, poupa tempo)
METABASE_INITIAL_SIZE=args.metabase_initial_size
init_params = dict(
meta_model=meta_model,
base_models=models,
base_tuners=[],
train_extractor=train_extractor,
horizon_extractor=horizon_extractor,
test_extractor=test_extractor,
labelizer=labelizer,
scorer=A.accuracy_score,
meta_retrain_interval=META_RETRAIN_INTERVAL,
is_incremental=args.is_incremental,
incremental_trainer=incremental_trainer, # POHA PARA DE SER BIZONHO
)
fit_params = dict(
X=X,
Y=yt,
meta_window=args.meta_train_window,
train=args.train,
horizon=args.horizon,
test=args.test,
metabase_initial_size=METABASE_INITIAL_SIZE,
)
rl(MS)
FT_HISTORY = []
ms = MS.MetaStream(**init_params)
ms.fit(**fit_params, verbose=True, skip_tune=True);
FT_HISTORY.append(meta_model.lgb.feature_importance())
# In[31]:
# Backup para poder recomeçar a fase online sem gerar a metabase novamente
meta_x = ms.meta_x.copy()
meta_y = ms.meta_y.copy()
nxtr, nytr = ms.next_x_train.copy(), ms.next_y_train.copy()
nxhr, nyhr = ms.next_y_horizon.copy(), ms.next_y_horizon.copy()
cached_metafeatures = ms.cached_metafeatures.copy()
base_evals = ms._base_evals.copy()
stream = copy.deepcopy(ms.current_stream)
counter_labels = copy.deepcopy(ms._counter_labels)
# # PAra testar com implementação do
# rl(MS)
# FT_HISTORY = []
# ms2 = MS.MetaStream(**init_params)
# ms2.fit(**fit_params, verbose=True, skip_tune=True);
# # FT_HISTORY.append(meta_model.lgb.feature_importance())
rl(M)
rl(A)
mmetrics_fun = [M.METRICS_CLF[met] for met in META_METRICS]
off_meta_eval = []
off_preds = []
off_targets = []
print("FASE OFFLINE")
mm = M.CLF[META_MODEL](
fit_params=HP_META_MODEL,
classes=[m for m in MODELS],
)
train_idx_lis, test_idx_lis = A.TimeSeriesCVWindows(
n=args.offline_size, train=args.train, test=args.test
)
df_meta_x = pd.DataFrame(ms.meta_x)
fnames = df_meta_x.columns
meta_x_off = df_meta_x.values
meta_y_off =pd.Series(ms.meta_y).values
for (train_idx, test_idx) in tqdm(zip(train_idx_lis, test_idx_lis)):
xtrain, ytrain = meta_x_off[train_idx], meta_y_off[train_idx]
xtest, ytest = meta_x_off[test_idx], meta_y_off[test_idx]
mm.fit(pd.DataFrame(xtrain, columns=fnames), ytrain)
predictions = mm.predict(xtest)
off_preds.append(predictions)
off_targets.append(ytest)
off_meta_eval.append(
[m(y_true=ytest,
y_pred=mm.label_encoder.inverse_transform(predictions))
for m in mmetrics_fun]
)
del fnames, df_meta_x, meta_x_off, meta_y_off, mm
print("FIM FASE OFFLINE")
print("gamma:", args.gamma)
FT_HISTORY = []
lis = []
true_lis = []
online_size = args.online_size
predict_lis = []
processed = ms._processed
print("INÍCIO FASE ONLINE")
for i in tqdm(range(1, online_size+1)):
if not ms.current_stream.has_more_samples():
print(f"Acabaram os dados no índice {i}")
break
xtest, ytest = (
i.tolist() for i in ms.current_stream.next_sample(args.test)
)
# Predição (nível meta)
pred=ms.predict(xtest, sel=args.gamma)
lis.extend(pred)
pre_dict = {
'true': np.array(ytest),
}
# Predição nível base
for m in models:
pre_dict[m.name] = m.model.predict(
xtest
)
predict_lis.append(pre_dict)
try:
ms.update_stream(
xtest,
ytest,
sel=args.gamma,
base_retrain=True,
# verbose=True,
)
true_lis.append(ms.meta_y[-1])
except Exception as e:
print("Acabaram-se os generators")
raise e
break
FT_HISTORY.append(meta_model.lgb.feature_importance())
df_fti = pd.DataFrame(FT_HISTORY, columns=ms.meta_model.lgb.feature_name())
df_fti.to_csv(PATH/'csv'/'df_fti.csv', index=False,)
# Motivo: lgbm trabalha com números, então pegamos o nome
# do modelo usando o transformação inversa
lis = ms.meta_model.label_encoder.inverse_transform(
lis
)
joblib.dump(lis, PATH/'joblib'/'meta_predicts.joblib')
joblib.dump(ms.meta_y, PATH/'joblib'/'meta_labels.joblib')
joblib.dump(ms._base_evals, PATH/'joblib'/'base_evals.joblib')
print("FIM FASE ONLINE")
print("DAQUI PARA BAIXO SÃO APENAS DUMPS E PLOTS")
df_base_online_predict = pd.DataFrame(predict_lis)
aux = df_base_online_predict.apply(
lambda x: [accuracy_score(i, x[0]) for i in x],
axis=1,
)
df_online_scores = pd.DataFrame(aux.to_list(), columns=df_base_online_predict.columns)
df_online_scores.to_csv(PATH/'csv'/'df_online_scores.csv', index=False)
def log_meta_offline_metrics(off_meta_eval):
def inner(s):
mean, std = np.mean(s), np.std(s)
mean, std = np.round(np.mean(s), 3), np.round(np.std(s), 3)
res = f"{s.name.capitalize().ljust(16)}: {mean:.3} ± {std:.3}"
print(res)
return mean, std
df_offline_meta_eval = pd.DataFrame(off_meta_eval, columns=META_METRICS)
mts = df_offline_meta_eval.apply(inner)
mts.index = ['mean', 'std']
mts.to_csv(PATH/'csv'/'meta_offline_metrics.csv', index='index')
# Para ler:
# pd.read_csv('offline_metrics.csv', index_col=0)
return mts
log_meta_offline_metrics(off_meta_eval)
def log_meta_online_metrics(y_true, y_pred):
mp = dict()
for mtr, mtr_name in zip(mmetrics_fun, META_METRICS):
mp[mtr_name] = (np.round(mtr(y_true=y_true, y_pred=y_pred), 3))
mp = | pd.Series(mp) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import os
from os import listdir
from os.path import isfile, join
import re
from path import Path
import numpy as np
import pandas as pd
from poor_trader import utils
from poor_trader.utils import quotes_range
from poor_trader.config import INDICATORS_OUTPUT_PATH
def _true_range(df_quotes, indices):
cur = df_quotes.iloc[indices[1]]
prev = df_quotes.iloc[indices[0]]
high, low, prev_close = cur.High, cur.Low, prev.Close
a = utils.roundn(high - low, 4)
b = utils.roundn(abs(high - prev_close), 4)
c = utils.roundn(abs(low - prev_close), 4)
return max(a, b, c)
def true_range(df_quotes):
df = pd.DataFrame(index=df_quotes.index)
df['n_index'] = range(len(df_quotes))
_trf = lambda x: _true_range(df_quotes, [int(i) for i in x])
df['true_range'] = df.n_index.rolling(2).apply(_trf)
return df.filter(like='true_range')
def SMA(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_SMA_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
df['SMA'] = df_quotes[field].rolling(period).mean()
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def STDEV(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_STDEV_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(index=df_quotes.index)
df['STDEV'] = df_quotes[field].rolling(period).std()
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def _ema(i, df_quotes, df_ema, period, field='Close'):
i = [int(_) for _ in i]
prev_ema, price = df_ema.iloc[i[0]], df_quotes.iloc[i[1]]
if pd.isnull(prev_ema.EMA):
return prev_ema.EMA
else:
c = 2. / (period + 1.)
return c * price[field] + (1. - c) * prev_ema.EMA
def EMA(df_quotes, period, field='Close', symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_{}_EMA_{}.pkl'.format(symbol, quotes_range(df_quotes), field, period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
c = 2./(period + 1.)
df = pd.DataFrame(columns=['EMA'], index=df_quotes.index)
sma = SMA(df_quotes, period, field)
_sma = sma.dropna()
if len(_sma.index.values) == 0:
print('ts')
df.loc[_sma.index.values[0], 'EMA'] = _sma.SMA.values[0]
for i in range(1, len(df_quotes)):
prev_ema = df.iloc[i-1]
if pd.isnull(prev_ema.EMA): continue
price = df_quotes.iloc[i]
ema_value = c * price[field] + (1. - c) * prev_ema.EMA
df.loc[df_quotes.index.values[i], 'EMA'] = ema_value
df = utils.round_df(df)
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return df
def ATR(df_quotes, period=10, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_ATR_{}.pkl'.format(symbol, quotes_range(df_quotes), period)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df = pd.DataFrame(columns=['ATR'], index=df_quotes.index)
df_true_range = true_range(df_quotes)
for i in range(1+len(df_quotes)-period):
if pd.isnull(df_true_range.iloc[i].true_range): continue
start = i
end = i+period
last_index = end - 1
trs = df_true_range[start:end]
prev_atr = df.iloc[last_index-1].ATR
if pd.isnull(prev_atr):
atr = np.mean([tr for tr in trs.true_range.values])
else:
atr = (prev_atr * (period-1) + df_true_range.iloc[last_index].true_range) / period
df.loc[df_quotes.index.values[last_index], 'ATR'] = atr
if symbol:
if not os.path.exists(outpath.parent):
os.makedirs(outpath.parent)
df.to_pickle(outpath)
return utils.round_df(df)
def atr_channel(df_quotes, top=7, bottom=3, sma=150, symbol=None):
if symbol:
outpath = INDICATORS_OUTPUT_PATH / '{}/{}_atr_channel_{}_{}_{}.pkl'.format(symbol, quotes_range(df_quotes), top, bottom, sma)
if os.path.exists(outpath):
return pd.read_pickle(outpath)
df_top_atr = ATR(df_quotes, period=top, symbol=symbol)
df_bottom_atr = ATR(df_quotes, period=bottom, symbol=symbol)
df_sma = SMA(df_quotes, period=sma, symbol=symbol)
df = | pd.DataFrame(columns=['top', 'mid', 'bottom'], index=df_quotes.index) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
import numpy as np
import numpy.matlib as npml
import pandas as pd
import statistics as st
from copy import deepcopy
import networkx as nx
import simpy
import matplotlib.pyplot as plt
from simplekml import Kml, Style # for graph_kml
import math
import shapely.geometry
import pyproj
import pandas as pd
from opentisim.liquidbulk.hydrogen_defaults import *
from opentisim.liquidbulk.hydrogen_objects import *
import opentisim
# In[ ]:
def cashflow_data_pipe(terminal, element): #(Terminal, element):
"""Place cashflow data in element dataframe
Elements that take two years to build are assign 60% to year one and 40% to year two."""
# years
years = terminal.modelframe
#years = list(range(Terminal.startyear, Terminal.startyear + Terminal.lifecycle))
# capex
capex = element.capex
#capex_material = element.capex_material
# opex
maintenance = element.maintenance
insurance = element.insurance
labour = element.labour
energy = element.energy
#purchaseH2 = element.purchaseH2
#purchase_material = element.purchase_material
# year online
year_online = element.year_online
year_delivery = element.delivery_time
df = pd.DataFrame()
# years
df["year"] = years
# capex
if year_delivery > 1:
df.loc[df["year"] == year_online - 2, "capex"] = 0.6 * capex
df.loc[df["year"] == year_online - 1, "capex"] = 0.4 * capex
else:
df.loc[df["year"] == year_online - 1, "capex"] = capex
#if capex_material:
#df.loc[df["year"] == year_online, "capex_material"] = capex_material
# opex
if maintenance:
df.loc[df["year"] >= year_online, "maintenance"] = maintenance
if insurance:
df.loc[df["year"] >= year_online, "insurance"] = insurance
if labour:
df.loc[df["year"] >= year_online, "labour"] = labour
if energy:
df.loc[df["year"] >= year_online, "energy"] = energy
# if insurance:
# df.loc[df["year"] >= year_online, "purchaseH2"] = purchaseH2
# if labour:
# df.loc[df["year"] >= year_online, "purchase_material"] = purchase_material
df.fillna(0, inplace=True)
element.df = df
return element
def cashflow_data(terminal, element): #(Terminal, element):
"""Place cashflow data in element dataframe
Elements that take two years to build are assign 60% to year one and 40% to year two."""
# years
years = terminal.modelframe
#years = list(range(Terminal.startyear, Terminal.startyear + Terminal.lifecycle))
# capex
capex = element.capex
#capex_material = element.capex_material
# opex
maintenance = element.maintenance
insurance = element.insurance
labour = element.labour
fuel = element.fuel
#purchaseH2 = element.purchaseH2
#purchase_material = element.purchase_material
# year online
year_online = element.year_online
year_delivery = element.delivery_time
df = | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import rankaggregation as ra
#Get list of all compound-sars-cov-2 viral protein interactions
compound_viral_df = pd.read_csv("../data/COVID-19/sars_cov_2_Compound_Viral_interactions_for_Supervised_Learning_full_metadata.csv",header='infer')
print("Loaded compound viral protein interactions for SARS-COV-2 viral proteins")
print(compound_viral_df.shape)
#For a given viral protein get ranked list of drugs for a particular ML method
def get_ranked_list(df,proteins,rev_drug_info,protein_mapping_dict,ranked_list_proteins):
for i in range(len(proteins)):
#Subset to single sars-cov-2 viral protein
temp_df = df[df["uniprot_accession"]==proteins[i]].copy()
#Order by predictions
temp_df = temp_df.sort_values(by="predictions",ascending=False)
#Subset to the same single sars-cov-2 viral protein
temp_rev_drug_info = rev_drug_info[rev_drug_info["uniprot_accession"]==proteins[i]].copy()
#Merge the two data frames to get compound names
temp2_df = pd.merge(temp_df,temp_rev_drug_info,on=["uniprot_accession","standard_inchi_key"],how='left')
temp2_df.drop_duplicates(inplace=True)
temp2_df = temp2_df.sort_values(by="predictions",ascending=False)
drug_info = temp2_df["compound_name"].values.tolist()
ranked_list_proteins[protein_mapping_dict[proteins[i]]].append(drug_info)
return(ranked_list_proteins)
#Aggregate the ranked list of drugs to get final set of ordered list of drugs
def per_protein_rank(ranked_list_proteins, protein_name):
temp_list = ranked_list_proteins[protein_name]
agg = ra.RankAggregator()
return(agg.average_rank(temp_list))
# +
#Use compound_viral_df and results from ML methods to generate ranked list
rf_smiles_predictions = pd.read_csv("../results/rf_LS_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
svm_smiles_predictions = pd.read_csv("../results/svm_LS_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
xgb_smiles_predictions = pd.read_csv("../results/xgb_LS_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
rf_mfp_predictions = pd.read_csv("../results/rf_MFP_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
svm_mfp_predictions = pd.read_csv("../results/svm_MFP_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
xgb_mfp_predictions = pd.read_csv("../results/xgb_MFP_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
cnn_predictions = pd.read_csv("../results/cnn_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
lstm_predictions = pd.read_csv("../results/lstm_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
cnn_lstm_predictions = pd.read_csv("../results/cnn_lstm_supervised_sars_cov_2_predictions.csv",header='infer',sep=",")
gat_cnn_predictions = pd.read_csv("../results/gat_cnn_supervised_sars_cov_2_predictions.csv",header='infer',sep=',')
#Get a list of the unique proteins
all_proteins = rf_smiles_predictions["uniprot_accession"].unique()
#Create a dictionary of ranked list based on the 3 protein names
ranked_list_proteins = {}
protein_mapping_dict = {}
for i in range(len(all_proteins)):
protein_fragment=compound_viral_df[compound_viral_df["uniprot_accession"]==all_proteins[i]]["Protein_Fragment"].unique()
protein_fragment=protein_fragment[0]
protein_mapping_dict[all_proteins[i]]=protein_fragment
ranked_list_proteins[protein_fragment]=[]
#Get ranked list for each protein using ML methods except GLM
#ranked_list_proteins = get_ranked_list(rf_smiles_predictions, all_proteins, compound_viral_df, protein_mapping_dict, ranked_list_proteins)
#ranked_list_proteins = get_ranked_list(svm_smiles_predictions,all_proteins,compound_viral_df,protein_mapping_dict,ranked_list_proteins)
ranked_list_proteins = get_ranked_list(xgb_smiles_predictions,all_proteins,compound_viral_df,protein_mapping_dict,ranked_list_proteins)
#ranked_list_proteins = get_ranked_list(rf_mfp_predictions,all_proteins,compound_viral_df, protein_mapping_dict, ranked_list_proteins)
ranked_list_proteins = get_ranked_list(svm_mfp_predictions,all_proteins,compound_viral_df, protein_mapping_dict, ranked_list_proteins)
ranked_list_proteins = get_ranked_list(xgb_mfp_predictions,all_proteins,compound_viral_df, protein_mapping_dict, ranked_list_proteins)
ranked_list_proteins = get_ranked_list(cnn_predictions,all_proteins,compound_viral_df, protein_mapping_dict, ranked_list_proteins)
#ranked_list_proteins = get_ranked_list(lstm_predictions,all_proteins, compound_viral_df,protein_mapping_dict,ranked_list_proteins)
#ranked_list_proteins = get_ranked_list(cnn_lstm_predictions,all_proteins, compound_viral_df, protein_mapping_dict,ranked_list_proteins)
ranked_list_proteins = get_ranked_list(gat_cnn_predictions,all_proteins, compound_viral_df, protein_mapping_dict,ranked_list_proteins)
# +
##Perform rank aggregation per protein: this ranking strategy is not used
#protein_names=[]
#for i in range(len(all_proteins)):
# protein_names.append(protein_mapping_dict[all_proteins[i]])
#print(protein_names)
##Get ranked list for each viral protein
#rankings = per_protein_rank(ranked_list_proteins,protein_names[0])
#rankings_df = pd.DataFrame(rankings,columns=['Drug','Overall Weight'])
#rankings_df['Protein_Fragment']=protein_names[0]
#rankings_df
# -
#Combine predictions to get rankings based on average predictions
def combined_df(df1,df2,df3,df4,df5,protein_id):
temp_df1=df1[df1["uniprot_accession"]==protein_id]
temp_df1=temp_df1.sort_values(by="standard_inchi_key")
temp_df1 = temp_df1.reset_index(drop=True)
temp_df2=df2[df2["uniprot_accession"]==protein_id]
temp_df2=temp_df2.sort_values(by="standard_inchi_key")
temp_df2 = temp_df2.reset_index(drop=True)
temp_df3=df3[df3["uniprot_accession"]==protein_id]
temp_df3=temp_df3.sort_values(by="standard_inchi_key")
temp_df3 = temp_df3.reset_index(drop=True)
temp_df4=df4[df4["uniprot_accession"]==protein_id]
temp_df4=temp_df4.sort_values(by="standard_inchi_key")
temp_df4 = temp_df4.reset_index(drop=True)
temp_df5=df5[df5["uniprot_accession"]==protein_id]
temp_df5=temp_df5.sort_values(by="standard_inchi_key")
temp_df5 = temp_df5.reset_index(drop=True)
final_df=pd.concat([temp_df1.iloc[:,0:3],temp_df2.iloc[:,2],
temp_df3.iloc[:,2],temp_df4.iloc[:,2],
temp_df5.iloc[:,2]],axis=1,join='inner',ignore_index=True)
return(final_df)
#Combine predictions of models and rank based on average predicted pChEMBL values
def get_results_with_pchembl(final_combined_df,rev_drug_info,protein_name):
average_combined_df = final_combined_df.iloc[:,[0,1]].copy()
average_combined_df.columns=["uniprot_accession","standard_inchi_key"]
average_combined_df["avg_predictions"]=final_combined_df.iloc[:,[2,3,4,5,6]].mean(axis=1)
final_output_df = | pd.merge(average_combined_df,rev_drug_info.iloc[:,[4,5,6]],on='standard_inchi_key') | pandas.merge |
""" This file process the IO for the Text similarity index processor """
import math
import os
import pandas as pd
from similarity_processor.similarity_core import get_cosine
from similarity_processor.similarity_core import text_to_vector
import similarity_processor.similarity_logging as cl
LOG = cl.get_logger()
def is_nan(value):
""" Function which identifies the "nan" on empty cells """
try:
return math.isnan(float(value))
except ValueError:
return False
class SimilarityIO:
""" This class is used for IO Processing the text similarity index processing tool.
User input file is fetched here, also intermediate file as well as
the final recommendation creating are tasks for this class """
def __init__(self, file_path, uniq_id, col_int, is_new_text, new_text=None):
"""constructor for SimilarityIO, which initializes the the input variables needed IO
processing """
LOG.info("\nSimilarity_UI \nValues passed:\n")
self.file_path = file_path
LOG.info("Path:%s", str(self.file_path))
self.uniq_id = uniq_id
LOG.info("\nUnique ID Column:%s", str(self.uniq_id))
self.col_int = col_int
LOG.info("\nColumns of Interest:%s", str(self.col_int))
self.is_new_text = is_new_text
self.new_text = new_text
LOG.info("\nNew_text:%s", str(self.new_text))
self.data_frame = None
self.uniq_header = None
def __get_file_path(self):
""" Function used for getting the file path where the results can be stored /
from where input is provided"""
return str(os.path.dirname(self.file_path))
def __get_file_name(self):
""" Function used for getting the input file name which can be further used for naming
the result """
file_path = self.file_path.split("/")
return os.path.splitext(file_path[-1])[0]
def __get_header(self):
""" Function to fetch the header from the inputfile read in the dataframe """
return list(self.data_frame.columns.values)
def __set_uniq_header(self):
""" Function to fetch the unique ID header """
sheet_headers = self.__get_header()
self.uniq_header = sheet_headers[int(self.uniq_id)]
def __get_duplicate_id(self):
""" Function which identifies if any duplicate ID present in the input file """
# List the duplicate ID
__duplicated_list = list(self.data_frame.duplicated())
__du_list = []
__data = [[]]
# Remove the 'NaN' in case of empty cell and filter only IDs
for key, item in enumerate(__duplicated_list):
if item:
__du_list.append(self.data_frame[self.uniq_header][key])
du_list = list(map(lambda x: 0 if is_nan(x) else x, __du_list))
__data = {'Duplicate ID': [nonzero for nonzero in du_list if nonzero != 0]}
# Create DataFrame and write
self.__write_xlsx(pd.DataFrame(__data), "Duplicate_ID")
def __read_to_panda_df(self):
""" Function which read the input data/xlsx to a pandas Data frame """
if not os.path.exists(self.file_path):
LOG.error("\nFile path is invalid")
return False
self.data_frame = pd.read_excel(self.file_path)
if self.data_frame.empty:
LOG.error("\nInput data is incorrect/ file is invalid/"
"It has more than one sheet")
return False
return True
def __get_needed_df_header(self, uniq_id_header, sheet_headers):
""" Function to fetch only the Uniq ID + column of interest as per user input """
self.col_int = list(self.col_int.split(','))
__column_of_interest_header = [sheet_headers[int(i)] for i in self.col_int]
__all_col_int = ",".join(str(potion) for potion in __column_of_interest_header)
return (uniq_id_header + "," + __all_col_int).split(",")
def __refine_df(self):
""" Create/Modify data frame with only needed contents as per user input """
sheet_headers = self.__get_header()
self.data_frame[sheet_headers[int(self.uniq_id)]] = self.data_frame[
sheet_headers[int(self.uniq_id)]].ffill()
self.data_frame = self.data_frame[self.__get_needed_df_header(
sheet_headers[int(self.uniq_id)], sheet_headers)]
def __create_merged_df(self):
""" Merge the text so as to form two column one with unique ID , other with merged
content in steps """
self.data_frame = (self.data_frame.set_index([self.uniq_header])
.apply(lambda x: ' '.join(x.dropna()), axis=1)
.reset_index(name='Steps'))
self.data_frame = self.data_frame.groupby(self.uniq_header)['Steps']\
.apply(' '.join).reset_index()
def __create_mergrd_file(self):
""" Create a copy of the merged content so that user can analyse """
self.__write_xlsx(self.data_frame, "merged_steps")
def __write_xlsx(self, data_f, name):
""" Function which write the dataframe to xlsx """
file_path = os.path.join(self.__get_file_path(), self.__get_file_name() + "_" + name)
# Github open ticket for the abstract method
writer = | pd.ExcelWriter('%s.xlsx' % file_path, engine='xlsxwriter') | pandas.ExcelWriter |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": | pandas.StringDtype() | pandas.StringDtype |
"""
"""
__version__='192.168.3.11.dev1'
import sys
import os
import logging
import pandas as pd
import re
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
logger = logging.getLogger('PT3S')
try:
from PT3S import Rm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Rm - trying import Rm instead ... maybe pip install -e . is active ...'))
import Rm
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
def addResVecToDfAlarmEreignisse(
dfAlarmEreignisse
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
):
"""
dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
BZKat: Betriebszustandskategorie des Alarms
Returns:
dfAlarmEreignisse with 2 Cols added:
resIDBase: die 1. OrtID von OrteIDs
dfResVec: der resVec des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
dfAlarmEreignisse['resIDBase']=dfAlarmEreignisse['OrteIDs'].apply(lambda x: x[0])
### Ergebnisvektor fuer alle Orte bestimmen
dfResVecs={}
dfResVecsLst=[]
for indexAlarm, rowAlarm in dfAlarmEreignisse.iterrows():
resIDBase=rowAlarm['resIDBase']
if resIDBase in dfResVecs.keys():
# resIDBase schon behandelt
dfResVecsLst.append(dfResVecs[resIDBase])
continue
# Spalten basierend auf resIDBase bestimmen
ErgIDs=[resIDBase+ext for ext in Rm.ResChannelTypesAll]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs] # jede Spalte koennte anstatt "normal" als IMDI. vorhanden sein
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Ergebnisspalten
if rowAlarm['LDSResBaseType']=='SEG':
dfFiltered=TCsLDSRes1.filter(items=ErgIDsAll,axis=1)
else:
dfFiltered=TCsLDSRes2.filter(items=ErgIDsAll,axis=1)
# Ergebnisspalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfFiltered.name=resIDBase
dfResVec=dfFiltered.rename(columns=colDct)
# Ergebnisvektor merken
dfResVecs[resIDBase]=dfResVec
dfResVecsLst.append(dfResVec)
logger.debug("{:s}resIDBase: {:50s} Anzahl gefundener Spalten in TCsLDSRes: {:d}".format(logStr, resIDBase, len(dfResVec.columns.to_list())))
dfAlarmEreignisse['dfResVec']=dfResVecsLst
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise e
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fGenAlarmVisTimeSpan(
tA
,tE
# alle nachfolgenden Werte sollten dieselbe Einheit haben und in dieser Einheit ganzzahlig sein
,timeSpan=pd.Timedelta('25 Minutes')
,timeRoundStr='1T'
,timeBoundaryMin=pd.Timedelta('3 Minutes')
,timeRef='A' # Alarme die laenger sind: Anfang oder Ende werden mit timeSpan dargestellt
):
"""
erzeugt eine Zeitspanne in welcher ein Alarm Zwecks Analyse dargestellt wird
tA, tE sind Anfang und Ende des Alarms
diese werden ab- (tA) bzw. aufgerundet (tE) mit timeRoundStr
zwischen den gerundeten Zeiten und tA/tE soll mindestens timeBoundaryMin liegen
wenn nicht, wird timeBoundaryMin auf tA/tE angewendet und dann wird gerundet
timeSpan ist die gewuenschte minimale Zeitspanne
Alarme die kuerzer sind werden mit timeSpan dargestellt
Alarme die laenger sind: Anfang oder Ende wird mit timeSpan dargestellt
"""
# Zeiten ab- und aufrunden
timeStart=tA.floor(freq=timeRoundStr)
timeEnd=tE.ceil(freq=timeRoundStr)
# gerundete Zeiten auf Mindestabstand pruefen
if tA-timeStart < timeBoundaryMin:
timeStart=tA-timeBoundaryMin
timeStart= timeStart.floor(freq=timeRoundStr)
if timeEnd-tE < timeBoundaryMin:
timeEnd=tE+timeBoundaryMin
timeEnd= timeEnd.ceil(freq=timeRoundStr)
# gerundete Zeiten auf Zeitspanne pruefen
timeLeft=timeSpan-(timeEnd-timeStart)
if timeLeft > pd.Timedelta('0 Seconds'): # die aufgerundete Alarmzeit ist kuerzer als timeSpan; timeSpan wird dargestellt
timeStart=timeStart-timeLeft/2
timeStart= timeStart.floor(freq=timeRoundStr)
timeEnd=timeEnd+timeLeft/2
timeEnd= timeEnd.ceil(freq=timeRoundStr)
else:
# die aufgerundete Alarmzeit ist laenger als timeSpan; A oder E wird mit timeSpan wird dargestellt
if timeRef=='A':
timeM=tA.floor(freq=timeRoundStr)
else:
timeM=tE.ceil(freq=timeRoundStr)
timeStart=timeM-timeSpan/2
timeEnd=timeM+timeSpan/2
if timeEnd-timeStart > timeSpan:
timeEnd=timeStart+timeSpan
ZeitbereichSel=timeEnd-timeStart
if ZeitbereichSel <= pd.Timedelta('1 Minutes'):
bysecond=list(np.arange(0,60,1))
byminute=None
elif ZeitbereichSel <= pd.Timedelta('3 Minutes'):
bysecond=list(np.arange(0,60,5))
byminute=None
elif ZeitbereichSel > pd.Timedelta('3 Minutes') and ZeitbereichSel <= pd.Timedelta('5 Minutes'):
bysecond=list(np.arange(0,60,15))
byminute=None
elif ZeitbereichSel > pd.Timedelta('5 Minutes') and ZeitbereichSel <= pd.Timedelta('20 Minutes'):
bysecond=list(np.arange(0,60,30))
byminute=None
elif ZeitbereichSel > pd.Timedelta('20 Minutes') and ZeitbereichSel <= pd.Timedelta('30 Minutes'):
bysecond=None
byminute=list(np.arange(0,60,1))
else:
bysecond=None
byminute=list(np.arange(0,60,3))
return timeStart, timeEnd, byminute, bysecond
def rptAlarms(
pdfErgFile='rptAlarms.pdf'
,figsize=Rm.DINA2q
,dpi=Rm.dpiSize
,dfAlarmStatistik=pd.DataFrame() # 1 Zeile pro SEG; Spalten mit Alarm-Informationen zum SEG
,dfAlarmEreignisse=pd.DataFrame() # 1 Zeile pro Alarm; Spalten mit Informationen zum Alarm
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,TCsLDSIn=pd.DataFrame()
,TCsOPC=pd.DataFrame()
,TCsSIDEvents=pd.DataFrame()
,IDSetsDctAlNr={} # AlNr-Keyed dct with ColIDs
,timeSpanMin={} # AlNr-Keyed dct with Timespan-Para
,QDct={} # AlNr-Keyed dct with QDct-Para
,pDct={} # AlNr-Keyed dct with pDct-Para
,QDctOPC={} # AlNr-Keyed dct with pDct-Para
,pDctOPC={} # AlNr-Keyed dct with pDctOPC-Para
,attrsDct=Rm.attrsDct
,plotOnlyAlNrn=None # zu Testzwecken; Liste der zu reportenden Alarme
,*args
,**kwds
):
"""
# ueber alle Segmente mit Alarmen
# alle Alarme eines SEGS in der Reihenfolge ihrer Nummerierung (also i.d.R. zeitlich) hintereinander
# jeden Alarm mit HYD und LDS in einem Bild darstellen
# es werden Einzelbilder und ein PDF erzeugt
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
rptAlarmsResults={}
# (gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults)
# PDF - DokAnfang
pdf=PdfPages(pdfErgFile)
(fileNameBase,ext)= os.path.splitext(pdfErgFile)
# ueber alle Segmente
for indexSEG,rowSEG in dfAlarmStatistik.iterrows():
strSEG="LfdNr {:2d} - {!s:3s}: {!s:15s}".format(
indexSEG+1
,rowSEG.DIVPipelineName
,rowSEG['SEGName']
# ,rowSEG['SEGResIDBase']
)
if rowSEG['FörderZeitenAlAnz']==0 and rowSEG['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz=0 und RuheZeitenAlAnz=0".format(strSEG))
continue
# Segmente mit Alarmen ...
# Alarmnummern
AlNrn=sorted(rowSEG['FörderZeitenAlNrn']+rowSEG['RuheZeitenAlNrn'])
#logger.info("{:s}: AlNrn: {!s:s}".format(strSEG, AlNrn))
# über alle Alarme des SEGs
for idxAlarm,AlNr in enumerate(AlNrn):
# der Alarm
s=dfAlarmEreignisse[dfAlarmEreignisse['Nr']==AlNr].iloc[0]
titleStr="{:s}: AlNrn: {!s:s}: AlNr.: {:d} ({:s}: {:s})".format(strSEG, AlNrn, AlNr,s.LDSResBaseType,s.resIDBase)
if plotOnlyAlNrn != None:
if AlNr not in plotOnlyAlNrn:
logger.info("{:s}: nicht in plotOnlyAlNrn ...".format(titleStr))
continue
logger.info(titleStr)
# sein Ergebnisvektor
resIDBase=s.resIDBase
dfResVec=s.dfResVec
# FIG
fig=plt.figure(figsize=figsize,dpi=dpi)
# SEG- oder Druck-Alarm
if s.LDSResBaseType=='SEG':
dfSegReprVec=dfResVec
dfDruckReprVec=pd.DataFrame()
else:
dfSegReprVec=pd.DataFrame()
dfDruckReprVec=dfResVec
timeStart, timeEnd, byminute, bysecond = fGenAlarmVisTimeSpan(s.tA,s.tE
,timeSpan=pd.Timedelta('25 Minutes')
,timeRoundStr='1T'
,timeBoundaryMin=pd.Timedelta('1 Minutes')
)
xlims=[(timeStart, timeEnd)]
byminute=byminute
bysecond=bysecond
vAreasX=[[(s.tA,s.tE)]]
gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults=Rm.plotTimespans(
xlims=xlims
,sectionTitles=["{:s}".format(s.DIVPipelineName)]
,sectionTitlesLDS=["tA={!s:s}".format(s.tA)]
,byminute=byminute
,bysecond=bysecond
,orientation='portrait'
,vAreasX=vAreasX
,vLinesXLDS=[]
,vAreasXLDS=[]
# --- Args Fct. ---:
,TCsLDSIn=TCsLDSIn
,TCsOPC=TCsOPC
,TCsOPCScenTimeShift=pd.Timedelta('0 seconds')
,TCsSIDEvents=TCsSIDEvents.filter(items=IDSetsDctAlNr[s.Nr]['I'] if s.Nr in IDSetsDctAlNr.keys() else [])
,TCsSIDEventsTimeShift= | pd.Timedelta('0 seconds') | pandas.Timedelta |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with | tm.assert_produces_warning(False) | pandas.util.testing.assert_produces_warning |
import numpy as np
import pandas as pd
from yews.cpic import pick
def detects2table(results_dict, wl, g, include_all=False, starttime=None):
'''
Converts dictionary of results from detect() function to pandas DataFrame
object. Columns include starttime, endtime, p probability and s probability
for each window. If include_all==True, all windows will be included in the
table, (not just those detected probability > threshold). Default is False.
If starttime argument is specified, the starttime and endtime columns will
contain datetime strings. Else, these columns contain values which are the
number of seconds since the start of the array.
Inputs:
results_dict: dictionary of results from detect function
detected_windows_only: Boolean
starttime: obspy UTCDateTime object
Output:
df: pandas data frame with probabilities of detection for each window
'''
data = []
cols = ('window start', 'window end', 'p prob', 's prob')
for i in range(len(results_dict['detect_p'])):
if results_dict['detect_p'][i] or results_dict['detect_s'][i] \
or include_all:
# log row in data table
if starttime:
window_start = str(starttime + i*g) # UTCDateTime object
window_end = str(starttime + i*g + wl)
else:
window_start = i*g # time in seconds since start of array
window_end = window_start + wl
p_prob = results_dict['detect_p'][i]
s_prob = results_dict['detect_s'][i]
row_entry = (window_start, window_end, p_prob, s_prob)
data.append(dict(zip(cols, row_entry)))
df = pd.DataFrame(data)
df = df[list(cols)]
return df
def find_runs_with_gaps(results_dict, max_gap):
'''
Find runs within results_dict from detect function where either the
detection probability for p or s is above the probability threshold,
allowing for max_gap 0's in between detected windows.
Inputs:
results_dict: dictionary of results from yews detect function
max_gap: max number of consecutive 0's allowable within runs
Output:
run_indices: list of pairs describing start and end of run windows
'''
scan_for_start = True
zero_count = 0
run_indices = []
for i in range(len(results_dict['detect_p'])):
if scan_for_start:
if results_dict['detect_p'][i] or results_dict['detect_s'][i]:
start_index = i
most_recent_nonzero = i
scan_for_start = False
else:
if results_dict['detect_p'][i] or results_dict['detect_s'][i]:
most_recent_nonzero = i
zero_count = 0
else:
if zero_count == max_gap:
run_indices.append([start_index, most_recent_nonzero])
zero_count = 0
scan_for_start = True
else:
zero_count += 1
return run_indices
def yield_pick_windows(array, fs, wl, g, results_dict, max_gap, buffer):
'''
Yield windows to run cpic picker over given detection results.
'''
run_indices = find_runs_with_gaps(results_dict, max_gap)
for run in run_indices:
start_index = int(fs*(run[0]*g - buffer))
if start_index < 0:
start_index = 0
end_index = int(fs*(run[1]*g + wl + buffer))
if end_index > (len(array[0]) - 1):
end_index = len(array[0]) - 1
yield start_index, array[:, start_index:end_index]
def generate_picks(array, model, transform, fs, wl, g_detect, g_pick,
results_dict, max_gap, buffer):
p_picks = []
p_confs = []
s_picks = []
s_confs = []
for start_index, window in yield_pick_windows(array, fs, wl, g_detect,
results_dict, max_gap, buffer):
pick_results = pick(window, fs, wl, model, transform, g_pick)
if type(pick_results['p']) == np.ndarray:
for i in range(len(pick_results['p'])):
picktime = pick_results['p'][i]
p_picks.append(start_index/fs + picktime)
p_confs.append(pick_results['p_conf'][i])
if type(pick_results['s']) == np.ndarray:
for i in range(len(pick_results['s'])):
picktime = pick_results['s'][i]
s_picks.append(start_index/fs + picktime)
s_confs.append(pick_results['s_conf'][i])
return {'p picks': p_picks, 'p confs': p_confs, 's picks': s_picks,
's confs': s_confs}
def picks2table(picks, starttime=None):
data = []
cols = ('phase', 'pick time', 'confidence')
for i in range(len(picks['p picks'])):
phase = 'p'
if starttime:
picktime = str(starttime + picks['p picks'][i])
else:
picktime = picks['p picks'][i]
conf = picks['p confs'][i]
row_entry = (phase, picktime, conf)
data.append(dict(zip(cols, row_entry)))
for i in range(len(picks['s picks'])):
phase = 's'
if starttime:
picktime = str(starttime + picks['s picks'][i])
else:
picktime = picks['s picks'][i]
conf = picks['s confs'][i]
row_entry = (phase, picktime, conf)
data.append(dict(zip(cols, row_entry)))
df = | pd.DataFrame(data) | pandas.DataFrame |
import os, random
import pandas as pd
import re
import sys
import codecs
from shutil import copyfile
from datetime import datetime
def clean_text(text):
text = text.replace("<p>", "").replace("</p>", "\n")
return re.sub('\.+', ".", text)
def filecount(dir):
return len([f for f in os.listdir(dir)])
def main(gnm_articles, article_input_dir, sample_output_dir, articles_file, comments_output_dir, comments_file):
'''For writing article text with atleast one comment to txt files'''
articles_df = pd.read_csv(gnm_articles)
#
comments_df = pd.read_csv(comments_file)
articles_with_comm = list(comments_df['article_id'].unique())
articles_df = articles_df[articles_df['article_id'].isin(articles_with_comm)]
for idx, article in articles_df.iterrows():
date = datetime.strptime(article['published_date'].split()[0], '%Y-%m-%d')
folder_name = article_input_dir + str(date.year)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
file_name = folder_name + "/" + str(article['article_id']) + ".txt"
text_file = codecs.open(file_name, "w", "utf-8")
cleaned_text = clean_text(article['article_text'])
text_file.write(cleaned_text)
text_file.close()
print("Articles with atleast one comment written to files.")
'''Getting sample articles'''
files = set()
count = pd.DataFrame.from_csv(articles_file, index_col=None, header=None)
dir = article_input_dir
output_dir = sample_output_dir
for idx, row in count.iterrows():
folder_name = dir + str(row[0])
num_of_articles = row[1]
num_of_files = filecount(folder_name)
for i in range(num_of_articles):
if num_of_files == len(files):
break
file_name = random.choice(os.listdir(folder_name))
while file_name in files:
file_name = random.choice(os.listdir(folder_name))
files.add(file_name)
year_folder = output_dir + str(row[0])
if not os.path.exists(year_folder):
os.makedirs(year_folder)
source = folder_name + "/" + file_name
copyfile(source, year_folder + "/" + file_name)
'''Getting comments from sampled articles'''
article_ids = | pd.DataFrame(columns=['article_id', 'year']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import warnings as w
import math
from scipy.signal import savgol_filter
from copy import deepcopy, copy
from fO2calculate import core
from fO2calculate import fO2bufferplotter
from fO2calculate import tavern as tv
def calc_dIW_from_fO2(P, T, fO2):
"""Translates from absolute (not Log) fO2 value into number of log units away from the
Iron-Wüstite buffer (dIW).
Parameters
----------
P float
Pressure in bars
T float
Temperature in degrees C
fO2 float
Absolute fO2 value
Returns
-------
float
fO2 in terms of dIW
"""
if fO2 <= 0:
return np.nan
else:
P_GPa = P / 10000
T_K = T + 273.15
log_fO2 = math.log10(fO2)
log_IW_value = fO2bufferplotter.buffers.calc_IW(P_GPa, T_K)
dIW = log_fO2 - log_IW_value
return dIW
def calc_dIW_from_fO2_Francis(T, fO2):
"""Uses Francis McCubbin spreadsheet for equation for IW buffer
Parameters
----------
T float
Temperature in degrees C
fO2 float
Absolute fO2 value
Returns
-------
float
fO2 in terms of dIW
"""
if fO2 <= 0:
return np.nan
else:
T_K = T + 273.15
log_fO2 = math.log10(fO2)
log_IW_value = (2 *
math.log10((math.exp((-275723 +
126.3154 * T_K -
8.11767 * T_K *
math.log(T_K))/
(8.314*T_K)))))
dIW = log_fO2 - log_IW_value
return dIW
def calc_gammas(temp, press, species='all'):
"""Returns fugacity coefficients calculated using the Redlich Kwong Equation of State.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME>
30 October 2003.
Parameters
----------
temp: float
Temperature in degrees C.
press: float
Pressure in bars.
species: str
Choose which species to calculate.
Options are: 'CH4', CO', 'CO2', 'H2', 'H2O', 'H2S', 'O2', 'S2', 'SO2', or all.
If all is passed, a dictionary of values is returned. Default value is 'all'.
Returns
-------
float or dict
Fugacity coefficient for passed species.
If single species is passed, float.
If "all" is passed, dictionary with keys 'CH4', CO', 'CO2', 'H2', 'H2O', 'H2S', 'O2',
'S2', 'SO2'
"""
tempK = temp + 273.15
R = 8.3145
gamma_dict = {}
for species in core.fluid_species_names:
#Calculate a and b parameters (depend only on critical parameters)...
a = 0.42748 * R**2.0 * core.critical_params[species]["cT"]**(2.5) / (core.critical_params[species]["cP"] * 10.0**5)
b = 0.08664 * R * core.critical_params[species]["cT"] / (core.critical_params[species]["cP"] * 10.0**5)
kappa = 0.0
#Calculate coefficients in the cubic equation of state...
#coeffs: (C0, C1, C2, A, B)
A = a * press * 10.0**5 / (math.sqrt(tempK) * (R * tempK)**2.0)
B = b * press * 10.0**5 / (R * tempK)
C2 = -1.0
C1 = A - B - B * B
C0 = -A * B
#Solve the cubic equation for Z0 - Z2, D...
Q1 = C2 * C1 / 6.0 - C0 / 2.0 - C2**3.0 / 27.0
P1 = C2**2.0 / 9.0 - C1 / 3.0
D = Q1**2.0 - P1**3.0
if D >= 0:
kOneThird = 1.0 / 3.0
absQ1PSqrtD = math.fabs(Q1 + math.sqrt(D))
temp1 = absQ1PSqrtD**kOneThird
temp1 *= (Q1 + math.sqrt(D)) / absQ1PSqrtD
absQ1MSqrtD = math.fabs(Q1 - math.sqrt(D))
temp2 = absQ1MSqrtD**kOneThird
temp2 *= (Q1 - math.sqrt(D)) / absQ1MSqrtD
Z0 = temp1 + temp2 - C2 / 3.0
else:
temp1 = Q1**2.0 / (P1**3.0)
temp2 = math.sqrt(1.0 - temp1) / math.sqrt(temp1)
temp2 *= Q1 / math.fabs(Q1)
gamma = math.atan(temp2)
if gamma < 0:
gamma = gamma + math.pi
Z0 = 2.0 * math.sqrt(P1) * math.cos(gamma/3.0) - C2 / 3.0
Z1 = 2.0 * math.sqrt(P1) * math.cos((gamma + 2.0 * math.pi) / 3.0) - C2/3.0
Z2 = 2.0 * math.sqrt(P1) * math.cos((gamma + 4.0 * math.pi) / 3.0) - C2/3.0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
if Z1 < Z2:
temp0 = Z1
Z1 = Z2
Z2 = temp0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
#Determine the fugacity coefficient of first root and departure functions...
#calcdepfns(coeffs[3], coeffs[4], paramsab[0], Z[0])
#calcdepfns(A, B, kappa, Z)
#Calculate Departure Functions
gamma = math.exp(Z0 - 1.0 - math.log(Z0-B) - A * math.log(1.0+B/Z0)/B)
Hdep = R * tempK * (Z0 - 1.0 - 1.5*A*math.log(1.0+B/Z0)/B)
Sdep = R * (math.log(Z0-B) - 0.5*A*math.log(1.0+B/Z0)/B)
gamma_dict[species] = gamma
if species is 'all':
return gamma_dict
elif isinstance(species, str):
try:
return gamma_dict[species]
except:
raise core.InputError('Passed species not recognized.')
else:
raise core.InputError('species must be type string.')
class Fluid(object):
"""
Creates a Fluid object with parameters defined here.
"""
def __init__(self, folder_name, DSC_filename='DSC.txt', masses_filename='masses.txt',
TG_filename='TG.txt', highT=False):
""" Initiates the Fluid class
Parameters
----------
folder_name str
Path to folder containing three text files with DSC, mass, and TG data. This is the
standard data format for Netchze gas files.
DSC_filename str
OPTIONAL. Default is 'DSC.txt'. Name of file with DSC data.
masses_filename str
OPTIONAL. Default is 'masses.txt'. Name of file with masses data.
TG_filename str
OPTIONAL. Default is 'TG.txt'. Name of file with TG data.
highT float
OPTIONAL. Default is False. If float, this value will be used as the minimum
temperature at which to keep gas data.
"""
# Read in files
dsc_data = pd.read_csv(folder_name + '/' + DSC_filename, delimiter="\t") # DSC file
masses_data = pd.read_csv(folder_name + '/' + masses_filename, delimiter="\t") # masses file
tg_data = pd.read_csv(folder_name + '/' + TG_filename, delimiter="\t") # TG file
# drop last column of masses (should be empty)
masses_data = masses_data.iloc[:, :-1]
# remove white space in column names
dsc_data.columns = dsc_data.columns.str.replace(' ', '')
masses_data.columns = masses_data.columns.str.replace(' ', '')
tg_data.columns = tg_data.columns.str.replace(' ', '')
# coerce all data to numeric
dsc_data = dsc_data.apply(pd.to_numeric, errors='coerce')
masses_data = masses_data.apply(pd.to_numeric, errors='coerce')
tg_data = tg_data.apply(pd.to_numeric, errors='coerce')
# drop all rows with any NaN
dsc_data = dsc_data.dropna()
masses_data = masses_data.dropna()
tg_data = tg_data.dropna()
# delete trailing data from cool-down phase
max_temp_val = masses_data['temp_2'].max()
max_temp_val = max_temp_val//1 # get floor (round down)
masses_data = masses_data.loc[(masses_data['temp_2']<max_temp_val)]
tg_data = tg_data.loc[(tg_data['temp_TG']<max_temp_val)]
dsc_data = dsc_data.loc[(dsc_data['temp_DSC']<max_temp_val)]
# if requested, drop all low temp data
if isinstance(highT, float) or isinstance(highT, int):
minT = highT
masses_data = masses_data.loc[(masses_data['temp_2']>=minT)]
tg_data = tg_data.loc[(tg_data['temp_TG']>=minT)]
dsc_data = dsc_data.loc[(dsc_data['temp_DSC']>=minT)]
self.max_temp_round = int(5 * round(max_temp_val/5)) #save max temp val rounded to nearest 5
# normalize small numbers to large
masses_data['ic_2'] = masses_data['ic_2'].apply(lambda x: x/10**-10)
masses_data['ic_16'] = masses_data['ic_16'].apply(lambda x: x/10**-10)
masses_data['ic_18'] = masses_data['ic_18'].apply(lambda x: x/10**-10)
masses_data['ic_28'] = masses_data['ic_28'].apply(lambda x: x/10**-10)
masses_data['ic_32'] = masses_data['ic_32'].apply(lambda x: x/10**-10)
masses_data['ic_34'] = masses_data['ic_34'].apply(lambda x: x/10**-10)
masses_data['ic_44'] = masses_data['ic_44'].apply(lambda x: x/10**-10)
masses_data['ic_64'] = masses_data['ic_64'].apply(lambda x: x/10**-10)
self.dsc_data = dsc_data
self.masses_data = masses_data
self.tg_data = tg_data
self.highT = highT
# subtract background from all masses we care about
self.masses_data = self._subtract_gas_background()
def get_smoothed_masses(self, window_length=11, polyorder=2):
""" Applies scipy.signal.savgol_filter to all gas data
Parameters
----------
window_length: int
After scipy.signal.savgol_filter. The length of the filter window (i.e., the number of
coefficients). window_length must be a positive odd integer. If mode is ‘interp’,
window_length must be less than or equal to the size of x.
polyorder: int
After scipy.signal.savgol_filter. The order of the polynomial used to fit the samples.
polyorder must be less than window_length.
Returns
-------
pandas DataFrame
"""
_masses_data = self.masses_data.copy()
for i, col_name in enumerate(core.fluid_col_names):
x = _masses_data['temp_2'].tolist()
gas = _masses_data[col_name].tolist()
gas_smooth = savgol_filter(gas, window_length, polyorder)
# drop non_smoothed column values
_masses_data.drop(col_name, axis=1, inplace=True)
# add smoothed values in place of dropped ones
_masses_data[col_name] = gas_smooth
return _masses_data
def _subtract_gas_background(self):
""" Finds minimum cps value for all species and subtracts this from all values in
that species curve.
Returns
-------
pandas DataFrame
"""
_masses_data = self.masses_data.copy()
for i, col_name in enumerate(core.fluid_col_names):
minvalue = _masses_data[col_name].min()
_masses_data[col_name] -= minvalue
return _masses_data
def calc_fO2_from_gas_ratios(self, oxidized_species):
"""
Returns fO2 value of gas calculated using molar ratios of redox couples.
Parameters
----------
oxidized_species str
Name of the oxidized species in the redox couple. Currently can take one of 'CO2'
or 'H2O'. 'CO2' will perform calculation on reaction: CO + 1/2O2 = CO2. 'H2O' will
perform calculation on reaction: H2 + 1/2O2 = H2O.
Returns
-------
pandas DataFrame
With temperature and computed fO2 values
"""
if oxidized_species is 'CO2':
reduced_species = 'CO'
temp_col = 'temp_28'
ox_mass_col = 'ic_44'
red_mass_col = 'ic_28'
ox_MW = 44.01
red_MW = 28
elif oxidized_species is 'H2O':
reduced_species = 'H2'
temp_col = 'temp_2'
ox_mass_col = 'ic_18'
red_mass_col = 'ic_2'
ox_MW = 18.02
red_MW = 2.02
else:
raise core.InputError("oxidized_species must be one of 'CO2' or 'H2O'.")
# Calculate absolute fO2 values
fO2_vals = []
temps = []
for index, row in self.masses_data.iterrows():
K_F = tv.calc_Ks(row[temp_col], species=oxidized_species)
gamma_oxidized = calc_gammas(row[temp_col], press=1, species=oxidized_species)
gamma_reduced = calc_gammas(row[temp_col], press=1, species=reduced_species)
ox_moles = row[ox_mass_col] #* ox_MW
red_moles = row[red_mass_col] #* red_MW
molar_ratio = ox_moles / red_moles
fO2_vals.append((molar_ratio * gamma_oxidized/gamma_reduced * 1/K_F)**2)
temps.append(row[temp_col])
abs_data = pd.DataFrame({'temp': temps, 'fO2': fO2_vals})
# Calculate dIW values as well
dIW_vals = []
for index, row in abs_data.iterrows():
dIW_vals.append(calc_dIW_from_fO2_Francis(row['temp'], row['fO2']))
return_data = abs_data.copy()
return_data['dIW'] = dIW_vals
return return_data
def calc_fCO(self):
""" Calculate fCO as:
fCO = KF * (XCH4/XH2)*(XH2O/XH2**2) * gammaCH4 * gammaH2O / gammaH2**3
Derivation:
-----------
CH4 + H2O = CO + 3H2
KF = XCO*gammaCO * (XH2*gammaH2)**3 / XCH4*gammaCH4 * XH2O*gammaH2O
KF = fCO * (XH2*gammaH2)**3 / XCH4*gammaCH4 * XH2O*gammaH2O
KF = fCO * (XH2/XCH4) * (gammaH2/gammaCH4) * (XH2**2/XH2O) * (gammaH2**2/gammaH2O)
KF = fCO * (XH2/XCH4) * (XH2**2/XH2O) * (gammaH2**3/gammaCH4*gammaH2O)
fCO = KF * (XCH4/XH2) * (XH2O/XH2**2) * gammaCH4 * gammaH2O / gammaH2**3
Returns
-------
pandas DataFrame
With temperature and computed fCO values
"""
fCO_vals = []
temp_vals = []
for index, row in self.masses_data.iterrows():
temp = row['temp_16']
K_F = tv.calc_Ks(temp, species='CH4')
gammaH2 = calc_gammas(temp, press=1, species='H2')
gammaCH4 = calc_gammas(temp, press=1, species='CH4')
gammaH2O = calc_gammas(temp, press=1, species='H2O')
temp_vals.append(temp)
fCO = (K_F * (row['ic_16']/row['ic_2']) *
(row['ic_18']/(row['ic_2']**2)) *
gammaCH4 * gammaH2O / (gammaH2**3))
fCO_vals.append(fCO)
return_data = pd.DataFrame({'temp': temp_vals, 'fCO': fCO_vals})
return return_data
def calc_fCO_alt(self, fO2_how='CO2'):
""" Calculate fCO as:
fCO = KF * sqrt(fO2) * (XCH4/XH2**2) * (gammaCH4/gammaH2**2)
Derivation:
-----------
CH4 + 1/2O2 = CO + 2H2
KF = (fCO * XH2**2*gammaH2**2)/(XCH4*gammaCH4 * sqrt(fO2))
KF = fCO * (1/sqrt(fO2)) * (XH2**2/XCH4) * (gammaH2**2)/(gammaCH4)
fCO = KF * sqrt(fO2) * (XCH4/XH2**2) * (gammaCH4/gammaH2**2)
Parameters:
-----------
fO2_how: str
Method to calculate fO2. Default is 'CO2', which uses calc_fO2_from_gas_ratios('CO2').
Can also be 'H2O', which uses calc_fO2_from_gas_ratios('H2O').
Returns
-------
pandas DataFrame
With temperature and computed fCO values
"""
# get fO2 values
fO2_data = self.calc_fO2_from_gas_ratios(fO2_how)
# copy over masses_data
_masses_data = self.masses_data.copy()
# add fO2 values to masses_data
data = | pd.concat([_masses_data, fO2_data], axis=1) | pandas.concat |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parser.add_argument('--data', type=str,
help='location of the pickle file')
# don't use this for now
parser.add_argument('--word', action='store_true',
help='Extracting context for words only?')
parser.add_argument('--output', type=str,
help='directory to save dataset in')
args = parser.parse_args()
with open('/mnt/dhr/CreateChallenge_ICC_0821/no_ner_0_50000.txt','r') as f:
contexts=f.read().split("\n")
contexts=contexts[:-1]
def left_side_parser(df): # N N _ _ _
cur_df=df.copy()
try:
cur_df[['modifier','head','w1','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid1_parser(df): # _ N N _ _
cur_df=df.copy()
try:
cur_df[['w1','modifier','head','w2','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df=pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context')
head_df=head_df.loc[head_df.context.isin(contexts)]
return compound_df,modifier_df,head_df
def mid2_parser(df): # _ _ N N _
cur_df=df.copy()
try:
cur_df[['w1','w2','modifier','head','w3']]=cur_df.lemma_pos.str.split(' ',expand=True)
except ValueError:
compound_df=pd.DataFrame()
modifier_df=pd.DataFrame()
head_df=pd.DataFrame()
return compound_df,modifier_df,head_df
compound_df=pd.melt(cur_df,id_vars=['modifier','head','year','count'],value_vars=['w1','w2','w3'],value_name='context')
compound_df=compound_df.loc[compound_df.context.isin(contexts)]
modifier_df=pd.melt(cur_df,id_vars=['modifier','year','count'],value_vars=['head','w1','w2','w3'],value_name='context')
modifier_df=modifier_df.loc[modifier_df.context.isin(contexts)]
head_df= | pd.melt(cur_df,id_vars=['head','year','count'],value_vars=['modifier','w1','w2','w3'],value_name='context') | pandas.melt |
from django.shortcuts import render
from django.views.generic import FormView, UpdateView
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from common.mixins import JSONResponseMixin, AdminUserRequiredMixin
from common.utils import get_object_or_none
from common.tools.data_fit import iter_regression4allxy
from common.tools.data_confidenc import show_all_confids
from django import forms as oforms
from .. import forms
from django.views import View
import codecs
import xlrd
import xlwt
import uuid
import datetime
import pandas as pd
import os
from io import StringIO, BytesIO
import re
import copy
# Create your views here.
global gdatas
# 文件名:pandas
gdatas = {}
# 数据间的关系
global rdatas
rdatas = {}
# 数据的置信度
global cdatas
cdatas = {}
global ccollist
ccollist=[]
# 数据的拟合
global fdatas
fdatas = {}
def btUrldecode(urldata, colnames):
trim = dict()
trim['query'] = dict()
# 分片
trim['start'] = int(urldata.get('offset', 0)) # offset 偏移位置
entries = urldata.get('limit', 25) # 每页显示条目数
trim['offset'] = int(trim['start']) + int(entries) # 偏移条数位置
# 排序
order_type = urldata.get('orderType', 'asc')
order_col = urldata.get('orderName', 'pk')
trim['orderName'] = order_col if order_type == u'asc' else u'-' + order_col
# 查找表 每列
for colname in colnames:
tqur = urldata.get("search_{}".format(colname))
if tqur:
trim['query'][colname] = tqur
return trim
def index(request):
return render(request, 'data_analy/index.html')
# @login_required
def data_index(request):
"数据 原始输出 表头"
context = {}
context["collist"] = []
if len(gdatas.keys()) > 0:
tclist = gdatas[list(gdatas.keys())[0]].columns
context["collist"] = tclist
return render(request, 'data_analy/data_index.html', context)
def data_list(request):
"数据 原始输出 内容"
if len(gdatas.keys()) > 0:
tpd = gdatas[list(gdatas.keys())[0]]
qd = btUrldecode(request.GET, tpd.columns)
# 筛选+排序
outjson = data_list_core(tpd, qd)
outjson['_'] = request.GET.get('_', 0)
return JsonResponse(outjson)
else:
return JsonResponse({})
def data_list_core(inpandas, qd):
"数据筛选排序通用核心: 数据frame, 请求json"
# 筛选
newtpd = copy.deepcopy(inpandas)
newtpd = newtpd.applymap(str)
indlist = set(newtpd.index)
for coln in inpandas.columns:
if coln in qd['query']:
tind = newtpd[coln].str.find(qd['query'][coln])
tind = tind[tind > -1].index
indlist = indlist & set(tind)
indlist = sorted(indlist)
totals = len(indlist)
# 取转为str前的 pandas
newtpd = inpandas.iloc[indlist, :]
# 排序分页
if qd['orderName'] != "pk":
if qd['orderName'].startswith("-"):
qd['orderName'] = qd['orderName'].lstrip("-")
newtpd = newtpd.sort_values(by=[qd['orderName']], ascending=[False])
else:
newtpd = newtpd.sort_values(by=[qd['orderName']])
newtpd = newtpd.iloc[qd['start']:qd['offset'], :]
return {
'total': totals,
'data': query2dict(newtpd),
}
def prob_check_v(request):
"指标 汇总输出 表头"
context = {}
# 1. 列名 2. 平稳性 3.
context["collist"] = ["names", "mean", "std"]
return render(request, 'data_analy/prob_check_index.html', context)
def data_prob_check(request):
"指标 汇总输出 内容"
if len(rdatas.keys()) > 0:
tpd = rdatas[list(rdatas.keys())[0]]
return JsonResponse({
'total': tpd.shape[0],
'data': query2dict(tpd),
'_': request.GET.get('_', 0)
})
else:
return JsonResponse({})
def relation_v(request):
"相关性 汇总输出 表头"
context = {}
# 1. 关系对名字 2.
context["collist"] = ["names", "a c", "b c"]
return render(request, 'data_analy/relation_index.html', context)
def data_relation(request):
"相关性 汇总输出 内容"
if len(rdatas.keys()) > 0:
tpd = rdatas[list(rdatas.keys())[0]]
return JsonResponse({
'total': tpd.shape[0],
'data': query2dict(tpd),
'_': request.GET.get('_', 0)
})
else:
return JsonResponse({})
def confidence_v(request):
"置信度 汇总输出 表头"
context = {}
context["collist"] = []
if len(cdatas.keys()) > 0:
ccollist = cdatas[list(gdatas.keys())[0]].columns
context["collist"] = ccollist
return render(request, 'data_analy/confidence_index.html', context)
def data_confidence(request):
"置信度 汇总输出 内容"
if len(gdatas.keys()) > 0:
# if len(cdatas) > 0:
# ttnewtpd = cdatas[list(gdatas.keys())[0]]
# else:
tpd = gdatas[list(gdatas.keys())[0]]
tprob = request.GET.get("reply_prob")
tposit = request.GET.get("reply_posit")
if tposit == "":
tposit = None
else:
tposit = float(tposit)
if tprob == "":
tprob = 0.95
else:
tprob = float(tprob)
showjson = show_all_confids(tpd, prob=tprob, posit=tposit)
cdatas[list(gdatas.keys())[0]] = pd.DataFrame(showjson)
ttnewtpd = cdatas[list(gdatas.keys())[0]]
ccollist =ttnewtpd.columns
qd = btUrldecode(request.GET, ccollist)
outjson = data_list_core(ttnewtpd, qd)
outjson['_'] = request.GET.get('_', 0)
# print(outjson)
return JsonResponse(outjson)
else:
return JsonResponse({})
def fit_v(request):
"拟合 汇总输出 表头"
context = {}
context["collist"] = []
if len(fdatas.keys()) > 0:
tclist = fdatas[list(gdatas.keys())[0]].columns
context["collist"] = tclist
return render(request, 'data_analy/fit_index.html', context)
def data_fit(request):
"拟合 汇总输出 内容"
if len(gdatas.keys()) > 0:
if len(fdatas) > 0:
ttnewtpd = fdatas[list(gdatas.keys())[0]]
else:
# 判断生成拟合信息
tpd = gdatas[list(gdatas.keys())[0]]
showjson = iter_regression4allxy(tpd, max_combnum=2, test_size=0.2)
fdatas[list(gdatas.keys())[0]] = | pd.DataFrame(showjson) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Python Script related to:
Deep Neural Network model to predict the electrostatic parameters in the polarizable classical Drude oscillator force field
<NAME>, <NAME>, <NAME> and <NAME>.
Copyright (c) 2022, University of Maryland Baltimore
"""
import numpy as np
import pandas as pd
from tensorflow import keras
from collections import OrderedDict
def load_train_charge():
charge_fea_train=pd.read_pickle('dgenff_dataset.2021/train_charge_feature.pkl')
charge_target_train=pd.read_pickle('dgenff_dataset.2021/train_charge_target.pkl')
train_charge_dataset=charge_fea_train.iloc[:,1:].values
train_charge_target=charge_target_train.iloc[:,1].values
train_charge_molid=np.array(charge_fea_train.index)
train_charge_atomid=charge_fea_train.iloc[:,0].values
return train_charge_molid,train_charge_atomid,train_charge_dataset,train_charge_target
def load_test_charge():
charge_fea_test=pd.read_pickle('dgenff_dataset.2021/test_charge_feature.pkl')
charge_target_test=pd.read_pickle('dgenff_dataset.2021/test_charge_target.pkl')
test_charge_dataset=charge_fea_test.iloc[:,1:].values
test_charge_target=charge_target_test.iloc[:,1].values
test_charge_molid=np.array(charge_fea_test.index)
test_charge_atomid=charge_fea_test.iloc[:,0].values
return test_charge_molid,test_charge_atomid,test_charge_dataset,test_charge_target
def load_train_pol():
alphathole_fea_train=pd.read_pickle('dgenff_dataset.2021/train_alphathole_feature.pkl')
alphathole_target_train=pd.read_pickle('dgenff_dataset.2021/train_alphathole_target.pkl')
train_alphathole_dataset=alphathole_fea_train.iloc[:,1:].values
train_alpha_target=alphathole_target_train.iloc[:,1].values
train_thole_target=alphathole_target_train.iloc[:,2].values
train_alphathole_molid=np.array(alphathole_fea_train.index)
train_alphathole_atomid=alphathole_fea_train.iloc[:,0].values
return train_alphathole_molid,train_alphathole_atomid,train_alphathole_dataset,train_alpha_target,train_thole_target
def load_test_pol():
alphathole_fea_test=pd.read_pickle('dgenff_dataset.2021/test_alphathole_feature.pkl')
alphathole_target_test= | pd.read_pickle('dgenff_dataset.2021/test_alphathole_target.pkl') | pandas.read_pickle |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = | Series([False, False, False]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 29 09:20:13 2021
@author: bw98j
"""
import prose as pgx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import numpy as np
import itertools
import glob
import os
import random
from tqdm import tqdm
import scipy.stats
import gtfparse
import itertools
from pylab import *
import collections
from sklearn.preprocessing import StandardScaler
import pickle
from sklearn.decomposition import PCA
import umap
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, auc
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from operator import itemgetter
#plot parameters
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Arial:italic'
plt.rcParams['mathtext.rm'] = 'Arial'
plt.rc('font',family='arial',size=40)
plt.rc('hatch',linewidth = 2.0)
#%%
conv=pd.read_csv('databases/ensembl_uniprot_conversion.tsv',
sep='\t',
comment='#',
)
conv = conv.rename(columns={'ID':'gene',
'Entry': 'uniprot'})
conv = conv[['gene','uniprot']]
conv = dict(zip(conv.gene,conv.uniprot))
validGenes = conv.keys() #set of genes with associated protein names
tpm = pd.read_csv('klijn_rna_seq/E-MTAB-2706-query-results.tpms.tsv', sep='\t',comment='#')
tpm.columns = [i.split(', ')[-1] for i in tpm.columns]
tpm = tpm.fillna(0)
tpm['protein'] = tpm.apply(lambda x: conv[x['Gene ID']] if x['Gene ID'] in validGenes else np.nan, axis=1)
tpm = tpm.dropna()
hela = tpm[['HeLa','protein']].set_index('protein')
#%%
ibaq = pd.read_csv('klijn_rna_seq/bekker_jensen_2017_ibaq_s3_mmc4.csv', skiprows=2)
ibaq = ibaq[['Protein IDs','Median HeLa iBAQ']]
ibaq['Protein IDs'] = ibaq.apply(lambda x: list(set([i.split('-')[0] for i in x['Protein IDs'].split(';')])),axis=1)
ibaq['matches'] = ibaq.apply(lambda x: len(x['Protein IDs']),axis=1)
ibaq = ibaq[ibaq.matches == 1]
ibaq['Protein IDs'] = ibaq.apply(lambda x: x[0][0], axis=1)
ibaq = ibaq.set_index('Protein IDs').drop(columns=['matches'])
ibaq = ibaq.dropna().drop_duplicates()
ibaq = np.log10(ibaq)
ibaq = ibaq[~ibaq.index.duplicated(keep='first')]
#%% Get HeLa DDA protein lists
with open('interim_files/HeLa_DDA_sample.pkl', 'rb') as handle:
testdata = pickle.load(handle)
#%%
panel_corr = | pd.read_csv('interim_files/klijn_panel_spearmanCorr.tsv', sep='\t',index_col=0) | pandas.read_csv |
from distutils.util import execute
import sqlite3
from datetime import datetime, timedelta, tzinfo
from importlib import resources
from pathlib import Path
from sqlite3.dbapi2 import OperationalError, Row
from typing import Any, Dict, List
import numpy as np
import pandas as pd
from DailyData import __version__
from DailyData.io.timelog_io import TimelogIO
from DailyData.time_management.recorded_activity import RecordedActivity
from dateutil import tz
from pandas.core import series
SCHEMA = 'schema.sql'
def apply_tz(row: pd.Series,
offset_col_name='timezone_offset',
tzname_col_name='timezone_name',
time_col_name='time'
) -> datetime:
if np.isfinite(row[offset_col_name]):
return apply_tz_single(row, offset_col_name, tzname_col_name, time_col_name)
else:
return row[time_col_name].tz_localize(tz.tzlocal())
def apply_tz_single(row,
offset_col_name='timezone_offset',
tzname_col_name='timezone_name',
time_col_name='time'
):
tz_inst = tz.tzoffset(row[tzname_col_name],
timedelta(seconds=row[offset_col_name]))
non_naive_datetime = tz_inst.fromutc(
row[time_col_name].replace(tzinfo=tz_inst))
return non_naive_datetime
def dict_factory(cursor: sqlite3.Cursor, row):
d = {}
for i, col in enumerate(cursor.description):
d[col[0]] = row[i]
return d
class DatabaseWrapper(TimelogIO):
"""
A Handler that performs common timelog operations on a sqlite3 database.
"""
def __init__(self, db_path: Path = None):
if not db_path:
db_path = ':memory:'
self.db = sqlite3.connect(
db_path, detect_types=sqlite3.PARSE_DECLTYPES)
self.db.row_factory = sqlite3.Row
n_tables = self.db.execute(
'SELECT COUNT(name) FROM sqlite_master WHERE type="table" AND name NOT LIKE "sqlite_%";').fetchone()[0]
try:
if n_tables == 0 or self.db.execute('SELECT version FROM metadata').fetchone()[0] != __version__:
# Run the schema script to create/update the tables if they do not exist, or are not the same version as the current program.
print('Updating database')
self.run_schema()
except (
sqlite3.OperationalError,
TypeError
)as err:
if isinstance(err, TypeError) or 'no such table: metadata' in ';'.join(err.args):
print('Updating database')
self.run_schema()
def __exit__(self, ex_type, ex_val, ex_tb):
self.db.close()
# Re-raise any exceptions
if ex_val is not None:
return False
def new_user(self, user: str):
self.db.execute('INSERT INTO user VALUES (:usr)', {'usr': user})
self.db.commit()
def new_activity(self, activity: str, parent: str = None, is_alias: bool = None):
if parent is not None:
if not self.db.execute('SELECT * FROM activity WHERE name=:parent', {'parent': parent}).fetchone():
raise ValueError(
'Parent activity {} does not exist'.format(parent))
elif not isinstance(parent, str):
raise TypeError('parent must be a string')
if is_alias is not None and parent is None:
raise ValueError(
'Activity cannot be an alias if there is no parent')
elif is_alias is not None and not isinstance(is_alias, bool):
raise TypeError('is_alias must be a boolean')
self.db.execute('INSERT INTO activity VALUES (:act, :parent, :alias)',
{'act': activity,
'parent': parent,
'alias': is_alias})
self.db.commit()
def get_activity_or_parent(self, activity):
activity_exists = self.db.execute(
'SELECT * FROM activity WHERE name=:activity',
{'activity': activity}).fetchone()
if not activity_exists:
return None
if activity_exists['parent'] is not None and activity_exists['alias']:
return activity_exists['parent']
else:
return activity
def record_time(self, activity: str, user: str, timestamp: datetime, backdated=False):
super().record_time(activity, user, timestamp, backdated)
last = self.get_last_record()
insert_cmd = '''INSERT INTO timelog (time, timezone_name, timezone_offset, activity, user, backdated)
VALUES(:time, :tz_name, :tz_offset, :act, :user, :backdated);
'''
# Make sure any values given with pandas datatypes can be recorded
if isinstance(timestamp, pd.Timestamp):
timestamp = timestamp.to_pydatetime()
old_act = activity
activity = self.get_activity_or_parent(activity)
if activity is None:
raise ValueError(
'Activity {} not found'.format(old_act))
self.db.execute(insert_cmd, {
# Convert the time to UTC if there is timezone information
'time': timestamp - (timestamp.tzinfo.utcoffset(timestamp) if timestamp.tzinfo else timedelta(0)),
'tz_name': timestamp.tzinfo.tzname(timestamp) if timestamp.tzinfo else None,
'tz_offset': timestamp.tzinfo.utcoffset(timestamp).total_seconds() if timestamp.tzinfo else None,
'activity': activity,
'act': activity,
'user': user,
'backdated': backdated
})
self.db.commit()
return last
def get_timestamps(self, earliest: datetime, latest: datetime) -> pd.DataFrame:
columns = ['time', 'timezone_offset',
'timezone_name', 'activity']
cmd = '''SELECT :cols FROM timelog WHERE time >= :min AND time < :max
'''.replace(':cols', ', '.join(columns))
old_row_factory = self.db.row_factory
self.db.row_factory = None
fetch = self.db.execute(cmd, {
'min': earliest,
'max': latest
}).fetchall()
self.db.row_factory = old_row_factory
if len(fetch) == 0:
return pd.DataFrame(columns=['time', 'activity'])
frame = | pd.DataFrame(fetch, columns=columns) | pandas.DataFrame |
import numpy as np
import cvxpy as cp
import pandas as pd
from scoring import *
# %%
def main():
year = int(input('Enter Year: '))
week = int(input('Enter Week: '))
budget = int(input('Enter Budget: '))
source = 'NFL'
print(f'Source = {source}')
df = read_data(year=year, week=week, source=source)
df = get_costs(df)
lineup, proj_pts, cost = get_optimal_lineup(df, budget)
print('---------- \n Lineup: \n', lineup)
print('---------- \n Projected Points: \n', proj_pts)
print(f'--------- \n Cost={cost}, Budget={budget}, Cap Room={budget-cost}')
return
def read_data(year, week, source):
POS = 'QB RB WR TE K DST'.split()
d = {'QB': scoring_QB,
'RB': scoring_RB,
'WR': scoring_WR,
'TE': scoring_TE,
'K': scoring_K,
'DST': scoring_DST}
player_dfs = {}
for pos in POS:
filepath = f'../data/{year}/{week}/{pos}/'
df = pd.read_csv(filepath+source+'.csv')
df = d[pos](df)
player_dfs[pos] = df
df = pd.concat(player_dfs).reset_index(drop=True)
df = df.join( | pd.get_dummies(df['pos']) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'],
index_col=['b', 'c'])
tm.assert_frame_equal(expected, df)
def test_usecols_implicit_index_col(self):
# see gh-2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_integer_like_header(self):
data = """2,0,1
1000,2000,3000
4000,5000,6000
"""
usecols = [0, 1] # column selection by index
expected = DataFrame(data=[[1000, 2000],
[4000, 5000]],
columns=['2', '0'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['0', '1'] # column selection by name
expected = DataFrame(data=[[2000, 3000],
[5000, 6000]],
columns=['0', '1'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates(self):
# See gh-9755
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_full_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('abcde')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_usecol_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('acd')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_unicode_strings(self):
# see gh-13219
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'AAA': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'BBB': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv( | StringIO(s) | pandas.compat.StringIO |
# -*- coding: utf-8 -*-
"""
@file:maketrain.py
@time:2019/5/6 16:42
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
import numpy as np
import gc
import time
name = ['log_0_1999', 'log_2000_3999', 'log_4000_5999','log_6000_7999', 'log_8000_9999', 'log_10000_19999',
'log_20000_29999', 'log_30000_39999','log_40000_49999',
'log_50000_59999','log_60000_69999','log_70000_79999','log_80000_89999','log_90000_99999',
'log_100000_109999','log_110000_119999','log_120000_129999','log_130000_139999']
def group_split(list_values):
new_values = []
for values in list_values:
vals = values.split(',')
for i in vals:
if i not in new_values:
new_values.append(i)
new_values.sort()
if 'all' in new_values:
return 'all'
str_val = new_values[0]
flag = 1
for i in new_values:
if flag == 1:
str_val = str(i)
flag = 0
else:
str_val = str_val + ',' + str(i)
return str_val
def putting_time_process(put_time):
bi_val = [0] * 48
for time in put_time:
time = int(time)
bi_time = bin(time)
j = 0
num = len(bi_time) - 1
while num > 1:
bi_val[j] += int(bi_time[num])
num -= 1
j += 1
n = 47
flag = 1
times = '0'
total = 0
while n >= 0:
if bi_val[n] >= 1:
val = 1
total += 1
else:
val = 0
if flag == 1:
flag = 0
times = str(val)
else:
times = times + str(val)
n -= 1
re_times1 = int(times, 2)
return re_times1, times, total
def disstatus(train, option):
print("status processing")
distime = []
opstatus = option[option['changeField'] == 1]
opstatus.index = opstatus['statime']
opstatus.sort_index()
opstatus.index = range(opstatus.shape[0])
values = opstatus['changeValue']
optime = opstatus['statime'].values
flag = 1
j = 0
for i in values:
if (i == '0') & (flag == 1):
distime.append(optime[j])
flag = 0
if (i == '1') & (flag == 0):
distime.append(optime[j])
flag = 1
j += 1
j = 0
if len(distime) == 0:
return train
elif (len(distime) % 2 == 0):
for i in range(int(len(distime) / 2)):
Train = pd.DataFrame()
t1 = distime[j]
t2 = distime[j + 1]
# print(t1)
# print(t2)
j += 2
train1 = train[train['statime'] < t1]
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train1 = train[train['statime'] > t2]
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train = Train
else:
t1 = distime[-1]
train = train[train['statime'] < t1]
Train = pd.DataFrame()
for i in range(int(len(distime) / 2)):
Train = pd.DataFrame()
t1 = distime[j]
t2 = distime[j + 1]
j += 2
train1 = train[train['statime'] < t1]
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > t2]
Train = pd.concat([Train, train2])
train = Train
# print(train.shape)
del Train
gc.collect()
return train
def initValue(train, operate):
print("initing processing")
ope = operate[operate['optionType'] == 2]
# 初始化bid
print("initing bid")
inb = ope[ope['changeField'] == 2]['changeValue']
if inb.shape[0] == 0:
train.loc[:, 'adBid'] = 88
else:
inbid = '-1'
for i in inb:
inbid = i
break
train.loc[:, 'adBid'] = int(inbid)
# 初始化人群
print("initing peo")
train.loc[:, 'age'] = 'all'
train.loc[:, 'gender'] = 'all'
train.loc[:, 'area'] = 'all'
train.loc[:, 'status'] = 'all'
train.loc[:, 'education'] = 'all'
train.loc[:, 'consuptionAbility'] = 'all'
train.loc[:, 'device'] = 'all'
train.loc[:, 'work'] = 'all'
train.loc[:, 'connectionType'] = 'all'
train.loc[:, 'behavior'] = 'all'
if ope[ope['changeField'] == 3].shape[0] != 0:
inpeo = ope[ope['changeField'] == 3]['changeValue'].values[0]
peofea = inpeo.split("|")
for fea in peofea:
l = fea.split(':')
if (len(l) < 2):
continue
feas = l[1].split(',')
feas.sort()
if (feas is None):
continue
flags = 1
feature = '0'
for i in feas:
if (flags == 1):
feature = str(i)
flags = 0
continue
feature = feature + ',' + str(i)
# feature = str(feas)
if l[0].lower() == 'age':
if (len(feas) < 100):
# print(feature)
train.loc[:, 'age'] = feature
if l[0].lower() == 'gender':
# print(feature)
train.loc[:, 'gender'] = feature
if l[0].lower() == 'area':
# print(feature)
train.loc[:, 'area'] = feature
if l[0].lower() == 'status':
# print(feature)
train.loc[:, 'status'] = feature
if l[0].lower() == 'education':
# print(feature)
train.loc[:, 'education'] = feature
if l[0].lower() == 'consuptionability':
# print(feature)
train.loc[:, 'consuptionAbility'] = feature
if l[0].lower() == 'os':
# print(feature)
train.loc[:, 'device'] = feature
if l[0].lower() == 'work':
# print(feature)
train.loc[:, 'work'] = feature
if l[0].lower() == 'connectiontype':
# print(feature)
train.loc[:, 'connectionType'] = feature
if l[0].lower() == 'behavior':
# print(feature)
train.loc[:, 'behavior'] = feature
# 初始化投放时间
inti = ope[ope['changeField'] == 4]['changeValue'].values[0]
putting = inti.split(',')
len_inti = ope[ope['changeField'] == 4].shape[0]
# print(putting)
if (len(putting) != 7) or (len_inti == 0):
train.loc['puttingTime'] = '281474976710655'
else:
train.loc[train['week'] == 0, 'puttingTime'] = putting[0]
train.loc[train['week'] == 1, 'puttingTime'] = putting[1]
train.loc[train['week'] == 2, 'puttingTime'] = putting[2]
train.loc[train['week'] == 3, 'puttingTime'] = putting[3]
train.loc[train['week'] == 4, 'puttingTime'] = putting[4]
train.loc[train['week'] == 5, 'puttingTime'] = putting[5]
train.loc[train['week'] == 6, 'puttingTime'] = putting[6]
return train
def changeBid(train, operate):
print("changebid processing")
option = operate[operate['optionType'] == 1]
opbid = option[option['changeField'] == 2]
if opbid.shape[0] == 0:
return train
opbid.index = opbid['statime']
opbid.sort_index()
opbid.index = range(opbid.shape[0])
values = opbid['changeValue']
optime = opbid['statime'].values
j = 0
for ti in optime:
Train = pd.DataFrame()
train1 = train[train['statime'] <= ti]
# print(ti)
# print(train1['Reqday'].unique())
Train = pd.concat([Train, train1])
train2 = train[train['statime'] > ti]
# print(train2['Reqday'].unique())
train2.loc[:, 'adBid'] = int(values[j])
# print(train2.shape)
Train = | pd.concat([Train, train2]) | pandas.concat |
import pandas as pd
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
import os
import joblib
import json, codecs
import numpy as np
from sklearn.cross_decomposition import PLSRegression
from datetime import date
import Classes.Configurations as cfg
from Classes import Configurations
def import_excel_data():
try:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Import data (Excel file)", "Choose your matrix of inputs.")
file_path = filedialog.askopenfilename()
file_name = os.path.basename(file_path)
labels = pd.read_excel(file_path, 0)
label_df = pd.DataFrame(labels)
xdata = pd.read_excel(file_path, 1)
x_df = pd.DataFrame(xdata)
ydata = pd.read_excel(file_path, 2)
y_df = pd.DataFrame(ydata)
return label_df, x_df, y_df, file_name
except:
print("An exception occurred while importing excel file.")
def import_excel_data_single():
try:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Import data (Excel file)", "Choose your matrix of inputs.")
file_path = filedialog.askopenfilename()
file_name = os.path.basename(file_path)
data = pd.read_excel(file_path, 0)
data_df = pd.DataFrame(data)
return data_df
except:
print("An exception occurred while importing excel file.")
def import_excel_data_epo():
try:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Import data (Excel file)", "Choose your matrix of inputs.")
file_path = filedialog.askopenfilename()
file_name = os.path.basename(file_path)
labels = pd.read_excel(file_path, 0)
label_df = pd.DataFrame(labels)
xdatau = pd.read_excel(file_path, 1)
x_df_u = pd.DataFrame(xdatau)
xdatas = pd.read_excel(file_path, 2)
x_df_s = pd.DataFrame(xdatas)
ydata = pd.read_excel(file_path, 3)
y = pd.DataFrame(ydata)
return label_df, x_df_u, x_df_s, y, file_name
except:
print("An exception occurred while importing excel file.")
def data_to_excel(file_name, all_df, outliers_df):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '')
if not os.path.exists(path):
os.makedirs(path)
writer = pd.ExcelWriter(path + '\Mahalanobis_report.xlsx')
all_data = pd.DataFrame(all_df)
all_data.to_excel(writer, 'Selected data')
stats_all = pd.DataFrame(all_data.describe())
stats_all.to_excel(writer, 'Stats selected data')
outliers_data = pd.DataFrame(outliers_df)
outliers_data.to_excel(writer, 'Outliers data')
stats_outliers = pd.DataFrame(outliers_data.describe())
stats_outliers.to_excel(writer, 'Stats Outliers data')
writer.save()
def sigma_data_to_excel(file_name, all_df):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '')
if not os.path.exists(path):
os.makedirs(path)
writer = pd.ExcelWriter(path + '\Sigma_report.xlsx')
all_data = pd.DataFrame(all_df)
all_data.to_excel(writer, 'Selected data')
stats_all = pd.DataFrame(all_data.describe())
stats_all.to_excel(writer, 'Stats selected data')
writer.save()
def summary_data_to_excel(df, file_name):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '')
if not os.path.exists(path):
os.makedirs(path)
writer = pd.ExcelWriter(path + '\PLS_Report.xlsx')
all_data = pd.DataFrame(df)
all_data.to_excel(writer, 'PLS SUMMARY')
writer.save()
def summary_outlier_to_excel(df, file_name):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '')
if not os.path.exists(path):
os.makedirs(path)
writer = pd.ExcelWriter(path + '\Outliers_Report.xlsx')
all_data = pd.DataFrame(df)
all_data.to_excel(writer, 'OUTLIERS SUMMARY')
writer.save()
def data_describe_to_excel(df, df_outliers, file_name):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '')
if not os.path.exists(path):
os.makedirs(path)
writer = pd.ExcelWriter(path + '\PCA_stats_report.xlsx')
df_describe = pd.DataFrame(df.describe())
df_describe.to_excel(writer, 'Descriptive statistics inliers')
df_describe_out = pd.DataFrame(df_outliers.describe())
df_describe_out.to_excel(writer, 'Descriptive statistics outliers')
writer.save()
def save_model_to_pkl(model, model_name, file_name):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '') + '/PKL'
if not os.path.exists(path):
os.makedirs(path)
file_name_pls_final = os.path.join(path, str(model_name) + '.pkl')
joblib.dump(model, file_name_pls_final, protocol=2)
def save_model_to_json(model, model_name, file_name, df_model_resume, df_y_resume):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '') + '/JSON'
if not os.path.exists(path):
os.makedirs(path)
file_name_jsonfy = os.path.join(path, str(model_name) + '.json')
coef = np.array(model.coef_).tolist()
x_mean = np.array(model.x_mean_).tolist()
x_std = np.array(model.x_std_).tolist()
y_mean = np.array(model.y_mean_).tolist()[0]
coef_list = []
for value in coef:
coef_list.append(value[0])
train_resume = []
df_y_resume = | pd.DataFrame(df_y_resume) | pandas.DataFrame |
from bs4 import BeautifulSoup
import os
import pandas as pd
HTML_FOLDER = 'yeet/'
file_names = os.listdir(HTML_FOLDER)
final_list = [] # TODO: find some other solution other than a global variable
def extract_data(soup):
'''
Function that takes in a single HTML file and returns a DataFrame with the required data
:param soup:
'''
global final_list
for company in soup.div.children:
if company['class'] != ['ad_campaign_search']: # A div in the list is not a company. Something related to ads.
# We'll skip that div
company_name = company.contents[0].div.string
as_code = company.contents[0]['href'][1:]
as_site = "https://www.agencyspotter.com/" + as_code
# company_site = company.contents[1].div.contents[1].a['href']
company_employees = company.contents[2].div.div.contents[1].lstrip()
company_location = company.contents[3].string
company_id = company.contents[3]['data-id']
company_row = [company_id, company_name, company_employees, company_location, as_site, as_code] # single row of data
final_list.append(company_row) # at the end of the loop, we'll have all agencies info from a single HTML page
# final_list = pd.DataFrame()
for file in file_names:
with open(HTML_FOLDER + file) as fp:
soup = BeautifulSoup(fp, "lxml")
print("Parsing " + file)
extract_data(soup)
headings = ['id', 'name', 'employee strength', 'location', 'agency spotter website', 'agency spotter code']
df = | pd.DataFrame(final_list, columns=headings) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 15:26:36 2020
@author: <NAME>
"""
import pandas as pd
import numpy as np
data_ME = pd.read_csv("Portfolios_Formed_on_ME_monthly_EW.csv", index_col = 0)
def returns_anual(data_col):
returns = data_col / 100
n_months = returns.shape[0]
returns_annualized = (returns + 1).prod()**(12/n_months)
return returns_annualized
lo_20 = returns_anual(data_ME['Lo 20'])
hi_20 = returns_anual(data_ME['Hi 20'])
def vol_anual(data_col, n =12):
returns = data_col / 100
vol_annualized = returns.std()*np.sqrt(n)
return vol_annualized
vol_lo_20 = vol_anual(data_ME['Lo 20'])
vol_hi_20 = vol_anual(data_ME['Hi 20'])
data_ME.index = pd.to_datetime(data_ME.index, format="%Y%m")
data_ME.index = data_ME.index.to_period("M")
lo_20_date = returns_anual(data_ME['1999':'2015']['Lo 20'])
hi_20_date = returns_anual(data_ME['1999':'2015']['Hi 20'])
vol_lo_20_date = vol_anual(data_ME['1999':'2015']['Lo 20'])
vol_hi_20_date = vol_anual(data_ME['1999':'2015']['Hi 20'])
def drawdown(data_col):
rets = data_col / 100
wealth_index = 1000*(1 + rets).cumprod()
previous_peak = wealth_index.cummax()
drawdown = (wealth_index - previous_peak) / previous_peak
return drawdown
drawdown_lo20 = drawdown(data_ME['Lo 20'])
max_drawdown = drawdown_lo20['1999':'2015'].min()
time_md = drawdown_lo20['1999':'2015'].idxmin()
print(time_md)
month_max_drawdown = drawdown(data_ME['1999':'2015']['Lo 20']).idxmin()
drawdown_hi20 = drawdown(data_ME['Hi 20'])
max_drawdown_hi = drawdown_hi20['1999':'2015'].min()
time_md_hi = drawdown_hi20['1999':'2015'].idxmin()
print(time_md_hi)
month_max_drawdown = drawdown(data_ME['1999':'2015']['Lo 20']).idxmin()
edhec_data = pd.read_csv("edhec-hedgefundindices.csv", index_col = 0)
hfi = edhec_data
hfi.index = pd.to_datetime(hfi.index, format="%d/%m/%Y")
hfi.index = hfi.index.to_period('M')
def semideviation(data):
is_negative = data < 0
return data[is_negative].std(ddof = 0)
semi_indexs = semideviation(hfi['2009':'2018'])
max_semid_index = semi_indexs.idxmax()
min_semid_index = semi_indexs.idxmin()
hfi_analysis = hfi['2000':]
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
var_modified = var_gaussian(hfi_analysis["Distressed Securities"], level = 1, modified = True)
historical_var = var_historic(hfi_analysis["Distressed Securities"], level = 1)
ind = pd.read_csv("data/ind30_m_vw_rets.csv", header=0, index_col=0)/100
ind.index = | pd.to_datetime(ind.index, format="%Y%m") | pandas.to_datetime |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, _np_version_under1p9)
from pandas.tseries.index import Timestamp
from pandas.types.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData, tm.TestCase):
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
self.assertEqual(q, percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# test with and without interpolation keyword
self.assertEqual(q, q1)
def test_quantile_interpolation_dtype(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
if not _np_version_under1p9:
raise nose.SkipTest("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
self.assertEqual(q, percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# interpolation other than linear
expErrMsg = "Interpolation methods other than "
with tm.assertRaisesRegexp(ValueError, expErrMsg):
self.ts.quantile(0.9, interpolation='nearest')
# object dtype
with tm.assertRaisesRegexp(ValueError, expErrMsg):
q = Series(self.ts, dtype=object).quantile(0.7,
interpolation='higher')
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
self.assertEqual(result, expected)
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
self.assertTrue(np.isnan(res))
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'), pd.NaT],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern'), pd.NaT],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days'), pd.NaT]]
for case in cases:
s = pd.Series(case, name='XXX')
res = s.quantile(0.5)
self.assertEqual(res, case[1])
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name='XXX')
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
self.assertTrue(pd.isnull(Series([], dtype='M8[ns]').quantile(.5)))
self.assertTrue(pd.isnull(Series([], dtype='m8[ns]').quantile(.5)))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
self.assertTrue(res is pd.NaT)
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
def test_quantile_empty(self):
# floats
s = Series([], dtype='float64')
res = s.quantile(0.5)
self.assertTrue(np.isnan(res))
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype='int64')
res = s.quantile(0.5)
self.assertTrue(np.isnan(res))
res = s.quantile([0.5])
exp = | Series([np.nan], index=[0.5]) | pandas.Series |
from preprocessing.utils_communes import (
build_and_clean_df,
label_encoders_generator,
encode_df,
decode_df,
)
import pandas as pd
from preprocessing.preprocessing import (
standardize_education_level,
standardize_date,
standardize_tailmen,
standardize_bool_hors_nk,
standardize_moughataa_commune_float,
)
from config.preprocessing import preprocessing_FSMS_files_with_yields_types
from pathlib import Path
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import BaggingClassifier
feature_cols = [
"numquest",
"ident",
"enqu",
"wilaya",
"numen",
"hors nk",
"Tailmen",
"Nb_hom",
"Nb_fem",
"TxDep",
"Equiv_ad",
"moughataa",
"commune",
]
def impute_communes(features=feature_cols, aggregated_file=None):
if aggregated_file is None:
aggregated_file = (
str(Path.home()) + "/last_drive_version_standardized_aggregated_dataset.csv"
)
df_aggregated_file = | pd.read_csv(aggregated_file, low_memory=False) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn import metrics
from log import logger
def split_train_test(x, y, test_size, seed):
idx_norm = y == 0
idx_out = y == 1
n_f = x.shape[1]
del_list = []
for i in range(n_f):
if np.std(x[:, i]) == 0:
del_list.append(i)
if len(del_list) > 0:
logger.info("Pre-process: Delete %d features as every instances have the same behaviour: " % len(del_list))
x = np.delete(x, del_list, axis=1)
# keep outlier ratio, norm is normal out is outlier
if seed == -1:
rs = None
else:
rs = seed
x_train_norm, x_test_norm, y_train_norm, y_test_norm = train_test_split(x[idx_norm], y[idx_norm],
test_size=test_size,
random_state=rs)
x_train_out, x_test_out, y_train_out, y_test_out = train_test_split(x[idx_out], y[idx_out],
test_size=test_size,
random_state=rs)
x_train = np.concatenate((x_train_norm, x_train_out))
x_test = np.concatenate((x_test_norm, x_test_out))
y_train = np.concatenate((y_train_norm, y_train_out))
y_test = np.concatenate((y_test_norm, y_test_out))
# Standardize data (per feature Z-normalization, i.e. zero-mean and unit variance)
# scaler = StandardScaler().fit(x_train)
# x_train = scaler.transform(x_train)
# x_test = scaler.transform(x_test)
# Scale to range [0,1]
minmax_scaler = MinMaxScaler().fit(x_train)
x_train = minmax_scaler.transform(x_train)
x_test = minmax_scaler.transform(x_test)
return x_train, y_train, x_test, y_test
def semi_setting(x_train, y_train, ratio_known_outliers, seed):
outlier_indices = np.where(y_train == 1)[0]
n_outliers = len(outlier_indices)
if seed == -1:
rng = np.random.RandomState(None)
else:
rng = np.random.RandomState(seed)
n_known_outliers = int(round(n_outliers * ratio_known_outliers))
known_idx = rng.choice(outlier_indices, n_known_outliers, replace=False)
new_y_train = np.zeros(x_train.shape[0], dtype=int)
new_y_train[known_idx] = 1
return new_y_train
def get_sorted_index(score, order='descending'):
'''
:param score:
:return: index of sorted item in descending order
e.g. [8,3,4,9] return [3,0,2,1]
'''
score_map = []
size = len(score)
for i in range(size):
score_map.append({'index':i, 'score':score[i]})
if order == "descending":
reverse = True
elif order == "ascending":
reverse = False
score_map.sort(key=lambda x: x['score'], reverse=reverse)
keys = [x['index'] for x in score_map]
return keys
def get_rank(score):
'''
:param score:
:return:
e.g. input: [0.8, 0.4, 0.6] return [0, 2, 1]
'''
sort = np.argsort(score)
size = score.shape[0]
rank = np.zeros(size)
for i in range(size):
rank[sort[i]] = size - i - 1
return rank
def min_max_norm(array):
array = np.array(array)
_min_, _max_ = np.min(array), np.max(array)
if _min_ == _max_:
raise ValueError("Given a array with same max and min value in normalisation")
norm_array = np.array([(a - _min_) / (_max_ - _min_) for a in array])
return norm_array
def sum_norm(array):
array = np.array(array)
sum = np.sum(array)
norm_array = array / sum
return norm_array
def get_performance(score, y_true):
auc_roc = metrics.roc_auc_score(y_true, score)
precision, recall, _ = metrics.precision_recall_curve(y_true, score)
auc_pr = metrics.auc(recall, precision)
return auc_roc, auc_pr
def ensemble_scores(score1, score2):
'''
:param score1:
:param score2:
:return: ensemble score
@@ ensemble two score functions
we use a non-parameter way to dynamically get the tradeoff between two estimated scores.
It is much more important if one score function evaluate a object with high outlier socre,
which should be paid more attention on these scoring results.
instead of using simple average, median or other statistics
'''
objects_num = len(score1)
[_max, _min] = [np.max(score1), np.min(score1)]
score1 = (score1 - _min) / (_max - _min)
[_max, _min] = [np.max(score2), np.min(score2)]
score2 = (score2 - _min) / (_max - _min)
rank1 = get_rank(score1)
rank2 = get_rank(score2)
alpha_list = (1. / (2 * (objects_num - 1))) * (rank2 - rank1) + 0.5
combine_score = alpha_list * score1 + (1. - alpha_list) * score2
return combine_score
def mat2csv(in_path, out_root_path):
from scipy import io
import pandas as pd
data = io.loadmat(in_path)
x = np.array(data['X'])
y = np.array(data['y'], dtype=int)
n_f = x.shape[1]
columns = ["A" + str(i) for i in range(n_f)]
columns.append("class")
matrix = np.concatenate([x, y], axis=1)
df = pd.DataFrame(matrix, columns=columns)
name = in_path.split("/")[-1].split(".")[0]
df.to_csv(out_root_path + name + ".csv", index=False)
return
def get_summary(in_path):
import pandas as pd
name = in_path.split("/")[-1].split(".")[0]
df = pd.read_csv(in_path)
x = df.values[:, :-1]
y = df.values[:, -1]
n_x = x.shape[0]
n_f = x.shape[1]
n_ano = np.sum(y)
print("%s, %d, %d, %d" % (name, n_x, n_f, n_ano))
def mnist_od(org_df, out_root_path, a):
from numpy.random import RandomState
x = org_df.values[:, :-1]
y = org_df.values[:, -1]
n_f = x.shape[1]
if a == 1:
# use one class as normal, and sampling anomalies from other classes, imbalance rate=1%
for i in range(10):
normal_ids = np.where(y == i)[0]
n_normal = len(normal_ids)
n_anomaly = int(n_normal * 0.01)
for j in range(10):
candidate_ids = np.where(y != i)[0]
anomaly_ids = RandomState(None).choice(candidate_ids, n_anomaly, replace=False)
normal_data = x[normal_ids]
anomaly_data = x[anomaly_ids]
n_all = n_normal + n_anomaly
out_y = np.concatenate([np.zeros(n_normal, dtype=int), np.ones(n_anomaly, dtype=int)]).reshape([n_all, 1])
out_x = np.concatenate([normal_data, anomaly_data], axis=0)
print(out_x.shape, out_y.shape)
matrix = np.concatenate([out_x, out_y], axis=1)
columns = ["A" + str(i) for i in range(n_f)]
columns.append("class")
df = pd.DataFrame(matrix, columns=columns)
df.to_csv(out_root_path + "mnist_" + str(i) + "-" + str(j) + ".csv", index=False)
elif a == 2:
# use one class as anomaly (100), and sampling inliers from other classes, imbalance rate=1%
for i in range(10):
for j in range(10):
n_anomaly = 100
anomaly_ids = RandomState(None).choice(np.where(y == i)[0], n_anomaly, replace=False)
n_normal = 50 * n_anomaly
normal_ids = RandomState(None).choice(np.where(y != i)[0], n_normal, replace=False)
normal_data = x[normal_ids]
anomaly_data = x[anomaly_ids]
n_all = n_normal + n_anomaly
out_y = np.concatenate([np.zeros(n_normal, dtype=int), np.ones(n_anomaly, dtype=int)]).reshape([n_all, 1])
out_x = np.concatenate([normal_data, anomaly_data], axis=0)
print(out_x.shape, out_y.shape)
matrix = np.concatenate([out_x, out_y], axis=1)
columns = ["A" + str(i) for i in range(n_f)]
columns.append("class")
df = | pd.DataFrame(matrix, columns=columns) | pandas.DataFrame |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout,Flatten,Conv2D, MaxPooling2D
train_ds = pd.read_csv("./train.csv")
test_ds = pd.read_csv("./test.csv")
y_train = | pd.get_dummies(train_ds['label']) | pandas.get_dummies |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: 同花顺-数据中心-技术选股
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
获取 JS 文件的内容
:param file: JS 文件名
:type file: str
:return: 文件内容
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "创月新高") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新高
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"创月新高", "半年新高", "一年新高", "历史新高"}
:type symbol: str
:return: 创新高数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新高": "4",
"半年新高": "3",
"一年新高": "2",
"历史新高": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期高点", "前期高点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期高点日期"] = pd.to_datetime(big_df["前期高点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期高点"] = pd.to_numeric(big_df["前期高点"])
return big_df
def stock_rank_cxd_ths(symbol: str = "创月新低") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新低
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"创月新低", "半年新低", "一年新低", "历史新低"}
:type symbol: str
:return: 创新低数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新低": "4",
"半年新低": "3",
"一年新低": "2",
"历史新低": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = | pd.DataFrame() | pandas.DataFrame |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import pandas as pd
import pytest
from categorical_encoder import CategoricalEncoder, RareLabelEncoder
def test_CategoricalEncoder_count():
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0,]}
df = pd.DataFrame(df)
transf_df = {'category': [10,10,10,10,10,10,10,10,10,10,6,6,6,6,6,6,4,4,4,4],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='count')
encoder.fit(df, variables = ['category'])
X = encoder.transform(df)
pd.testing.assert_frame_equal(X, transf_df)
assert encoder.variables_ == ['category']
assert encoder.encoder_dict_ == {'category': {'A':10, 'B':6, 'C':4}}
assert encoder.input_shape_ == (20,2)
def test_CategoricalEncoder_frequency():
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0,]}
df = pd.DataFrame(df)
transf_df = {'category': [0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.3,0.3,
0.3,0.3,0.3,0.3,0.2,0.2,0.2,0.2],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='frequency')
encoder.fit(df, variables = ['category'])
X = encoder.transform(df)
pd.testing.assert_frame_equal(X, transf_df)
assert encoder.variables_ == ['category']
assert encoder.encoder_dict_ == {'category': {'A':0.5, 'B':0.3, 'C':0.2}}
assert encoder.input_shape_ == (20,2)
def test_CategoricalEncoder_ordinal():
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0,]}
df = pd.DataFrame(df)
transf_df = {'category': [0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,2,2,2,2],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='ordinal')
encoder.fit(df['category'].to_frame(), df['target'] )
X = encoder.transform(df['category'].to_frame())
pd.testing.assert_frame_equal(X, transf_df['category'].to_frame())
assert encoder.variables_ == ['category']
assert encoder.encoder_dict_ == {'category': {'A':0, 'B':1, 'C':2}}
assert encoder.input_shape_ == (20,1)
def test_CategoricalEncoder_mean():
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0,]}
df = pd.DataFrame(df)
transf_df = {'category': [0.200000,0.200000,0.200000,0.200000,0.200000,
0.200000,0.200000,0.200000,0.200000,0.200000,
0.333333,0.333333,0.333333,0.333333,0.333333,
0.333333,0.500000,0.500000,0.500000,0.500000],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='mean')
encoder.fit(df['category'].to_frame(), df['target'] )
X = encoder.transform(df['category'].to_frame())
pd.testing.assert_frame_equal(X, transf_df['category'].to_frame())
assert encoder.variables_ == ['category']
assert encoder.encoder_dict_ == {'category': {'A':0.20000000000000001,
'B':0.33333333333333331,
'C':0.5}}
assert encoder.input_shape_ == (20,1)
def test_CategoricalEncoder_ratio():
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0,]}
df = pd.DataFrame(df)
transf_df = {'category': [0.25,0.25,0.25,0.25,0.25,
0.25,0.25,0.25,0.25,0.25,
0.50,0.50,0.50,0.50,0.50,
0.50,1.00,1.00,1.00,1.00],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='ratio')
encoder.fit(df['category'].to_frame(), df['target'] )
X = encoder.transform(df['category'].to_frame())
pd.testing.assert_frame_equal(X, transf_df['category'].to_frame())
assert encoder.variables_ == ['category']
assert encoder.encoder_dict_ == {'category': {'A':0.25,
'B':0.49999999999999994,
'C':1.0}}
assert encoder.input_shape_ == (20,1)
# when one of the probabilities is zero
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,1,1,]}
df = pd.DataFrame(df)
transf_df = {'category': [0.25,0.25,0.25,0.25,0.25,
0.25,0.25,0.25,0.25,0.25,
0.50,0.50,0.50,0.50,0.50,
0.50,10000.00,10000.00,10000.00,10000.00],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='ratio')
encoder.fit(df['category'].to_frame(), df['target'] )
X = encoder.transform(df['category'].to_frame())
pd.testing.assert_frame_equal(X, transf_df['category'].to_frame())
def test_CategoricalEncoder_woe():
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,1,1,0,0, 1,1,0,0,]}
df = pd.DataFrame(df)
transf_df = {'category': [-1.386294,-1.386294,-1.386294,-1.386294,-1.386294,
-1.386294,-1.386294,-1.386294,-1.386294,-1.386294,
0.693147,0.693147,0.693147,0.693147,0.693147,
0.693147,0.000000,0.000000,0.000000,0.000000],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = pd.DataFrame(transf_df)
encoder = CategoricalEncoder(encoding_method='woe')
encoder.fit(df['category'].to_frame(), df['target'] )
X = encoder.transform(df['category'].to_frame())
pd.testing.assert_frame_equal(X, transf_df['category'].to_frame())
assert encoder.variables_ == ['category']
assert encoder.encoder_dict_ == {'category': {'A':-1.3862943611198906,
'B':0.69314718055994518,
'C':0.0}}
assert encoder.input_shape_ == (20,1)
# prob(1)==1
df = {'category': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,1,1,0,0, 1,1,1,1,]}
df = pd.DataFrame(df)
transf_df = {'category': [-1.386294,-1.386294,-1.386294,-1.386294,-1.386294,
-1.386294,-1.386294,-1.386294,-1.386294,-1.386294,
0.693147,0.693147,0.693147,0.693147,0.693147,
0.693147,9.210340,9.210340,9.210340,9.210340],
'target' : [1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0, 1,1,0,0]}
transf_df = | pd.DataFrame(transf_df) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: 东方财富-沪深板块-概念板块
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import pandas as pd
def stock_board_concept_name_em() -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-名称
http://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: pandas.DataFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"排名",
"最新价",
"涨跌幅",
"涨跌额",
"换手率",
"_",
"板块代码",
"板块名称",
"_",
"_",
"_",
"_",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"上涨家数",
"下跌家数",
"_",
"_",
"领涨股票",
"_",
"_",
"领涨股票-涨跌幅",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["上涨家数"] = pd.to_numeric(temp_df["上涨家数"])
temp_df["下跌家数"] = pd.to_numeric(temp_df["下跌家数"])
temp_df["领涨股票-涨跌幅"] = pd.to_numeric(temp_df["领涨股票-涨跌幅"])
return temp_df
def stock_board_concept_hist_em(symbol: str = "数字货币", adjust: str = "") -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-历史行情
http://q.10jqka.com.cn/gn/detail/code/301558/
:param symbol: 板块名称
:type symbol: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": "101",
"fqt": adjust_map[adjust],
"beg": "0",
"end": "20500101",
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.t | o_numeric(temp_df["开盘"]) | pandas.to_numeric |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Bio import pairwise2
from scipy import interp
from scipy.stats import linregress
from sklearn.metrics import roc_curve, auc, precision_recall_curve
import thoipapy
import thoipapy.validation.bocurve
from thoipapy.utils import make_sure_path_exists
def collect_indiv_validation_data(s, df_set, logging, namedict, predictors, THOIPA_predictor_name, subsets):
"""
Parameters
----------
s
df_set
logging
namedict
predictors
THOIPA_predictor_name
Returns
-------
"""
logging.info("start collect_indiv_validation_data THOIPA_PREDDIMER_TMDOCK")
ROC_AUC_df = pd.DataFrame()
PR_AUC_df = pd.DataFrame()
mean_o_minus_r_by_sample_df = pd.DataFrame()
AUBOC_from_complete_data_ser = pd.Series()
AUC_AUBOC_name_list = []
linechar_name_list = []
AUBOC_list = []
df_o_minus_r_mean_df = pd.DataFrame()
roc_auc_mean_list = []
roc_auc_std_list = []
# indiv_validation_dir: Path = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation"
indiv_validation_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/indiv_validation_data.xlsx"
thoipapy.utils.make_sure_path_exists(indiv_validation_data_xlsx, isfile=True)
# if not os.path.isdir(os.path.dirname(BOAUC10_barchart_pdf)):
# os.makedirs(os.path.dirname(BOAUC10_barchart_pdf))
for predictor in predictors:
BO_data_df = pd.DataFrame()
xv_dict = {}
ROC_AUC_dict = {}
PR_AUC_dict = {}
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
auc_pkl = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/roc_auc/{predictor}/ROC_AUC_data.pkl"
BO_curve_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/BO_Curve_data.csv"
bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/bocurve_data.xlsx"
BO_linechart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/BO_linechart.png"
BO_barchart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/AUBOC_barchart.png"
df_o_minus_r_mean_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/df_o_minus_r_mean.csv"
thoipapy.utils.make_sure_path_exists(auc_pkl, isfile=True)
thoipapy.utils.make_sure_path_exists(BO_curve_data_csv, isfile=True)
for i in df_set.index:
sys.stdout.write(".")
sys.stdout.flush()
acc = df_set.loc[i, "acc"]
database = df_set.loc[i, "database"]
acc_db = acc + "-" + database
merged_data_csv_path: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/predictions/merged/{database}.{acc}.merged.csv"
merged_data_df = pd.read_csv(merged_data_csv_path, engine="python")
# invert some predictors so that a high number always indicates a predicted interface residue
merged_data_df["LIPS_L*E"] = -1 * merged_data_df["LIPS_L*E"]
merged_data_df["PREDDIMER"] = -1 * merged_data_df["PREDDIMER"]
merged_data_df["TMDOCK"] = -1 * merged_data_df["TMDOCK"]
if database == "crystal" or database == "NMR":
# invert the interface score of structural data so that a high number indicates an interface residue
merged_data_df["interface_score"] = -1 * merged_data_df["interface_score"]
# toggle whether to use boolean (interface) or continuous data (interface_score). Here we want continuous data
experiment_col = "interface_score"
BO_single_prot_df = thoipapy.validation.bocurve.calc_best_overlap_from_selected_column_in_df(acc_db, merged_data_df, experiment_col, predictor)
if BO_data_df.empty:
BO_data_df = BO_single_prot_df
else:
BO_data_df = pd.concat([BO_data_df, BO_single_prot_df], axis=1, join="outer")
df_for_roc = merged_data_df.dropna(subset=[experiment_col, predictor])
fpr, tpr, thresholds = roc_curve(df_for_roc.interface, df_for_roc[predictor], drop_intermediate=False)
precision, recall, thresholds_PRC = precision_recall_curve(df_for_roc.interface, df_for_roc[predictor])
pr_auc = auc(recall, precision)
PR_AUC_dict[acc_db] = pr_auc
roc_auc = auc(fpr, tpr)
ROC_AUC_dict[acc_db] = roc_auc
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
xv_dict[acc_db] = {"fpr": fpr, "tpr": tpr, "roc_auc": roc_auc, "precision": precision, "recall": recall, "pr_auc": pr_auc}
# save dict as pickle
with open(auc_pkl, "wb") as f:
pickle.dump(xv_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
BO_data_df.to_csv(BO_curve_data_csv)
# parse BO data csv
# print out mean values
thoipapy.validation.bocurve.parse_BO_data_csv_to_excel(BO_curve_data_csv, bocurve_data_xlsx, s["n_residues_AUBOC_validation"], logging, predictor)
# ROC AUC validation
ROC_AUC_ser = pd.Series(ROC_AUC_dict)
ROC_AUC_ser.sort_values(inplace=True, ascending=False)
roc_auc_mean_list.append(ROC_AUC_ser.mean())
roc_auc_std_list.append(ROC_AUC_ser.std())
# precision-recall AUC validation
PR_AUC_ser = | pd.Series(PR_AUC_dict) | pandas.Series |
import pandas as pd
from glob import glob
from collections import defaultdict
import csv
import time
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('start_well_index', help='key to identify which wells to do')
args = parser.parse_args()
start_well_index = int(args.start_well_index)
sam_cols = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL'] + ['EXTRA'+str(i) for i in range(7)]
# this is lenient, not all of these reads will actually be used, but that's ok, just showing everything close is fine
def is_evidence(row, cmp):
left = int(row['POS'])
right = left+150
mut_matches = []
for pos in cmp[row['RNAME']]:
if pos > left and pos < right:
mut_matches.append(row['RNAME']+str(pos))
return ';'.join(mut_matches)
def alignment(r):
# uses the cigar to format the sequence to show in an alignment
# adds - for deletions, puts insertions in parentheses (will be shown only by a red line that shows the insertion on hover)
seq, cigar = r['SEQ'], r['CIGAR']
new_seq = ''
current_nums = ''
current_spot = 0
for character in cigar:
if character in ['M', 'D', 'I', 'S', 'H', '=', 'X']:
if character in ['H', '=', 'X']:
return new_seq
elif character == 'M':
new_seq += seq[current_spot:current_spot+int(current_nums)]
current_spot += int(current_nums)
elif character == 'D':
new_seq += '-'*int(current_nums)
else:
new_seq += '(' + seq[current_spot:current_spot+int(current_nums)] + ')'
current_spot += int(current_nums)
current_nums = ''
else:
current_nums += character
return new_seq
wells = ['P1B02', 'P1B03', 'P1B04', 'P1B07', 'P1B11', 'P1C02', 'P1C04', 'P1C05', 'P1C06', 'P1C07', 'P1C08', 'P1C09', 'P1C11', 'P1D03', 'P1D09', 'P1E04', 'P1E09',
'P1E11', 'P1F05', 'P1F07', 'P1F08', 'P1F10', 'P1F11', 'P1G04', 'P1G05', 'P1G08', 'P1G09', 'P1G10', 'P1G11', 'P1H11', 'P2B04', 'P2B05', 'P2B07', 'P2B08',
'P2B09', 'P2B10', 'P2B11', 'P2C02', 'P2C04', 'P2C05', 'P2C06', 'P2C10', 'P2C11', 'P2D03', 'P2D06', 'P2D08', 'P2D11', 'P2E06', 'P2E08', 'P2E11', 'P2F02',
'P2F07', 'P2F09', 'P2F11', 'P2G04', 'P2G05', 'P2G08', 'P2G09', 'P2G10', 'P2G11', 'P3B07', 'P3B08', 'P3B10', 'P3B11', 'P3C03', 'P3C04', 'P3C05', 'P3C07',
'P3C10', 'P3C11', 'P3D02', 'P3D03', 'P3D05', 'P3D09', 'P3D10', 'P3D11', 'P3E02', 'P3E08', 'P3E11', 'P3F03', 'P3F05', 'P3F07', 'P3F09', 'P3F11', 'P3G02',
'P3G05', 'P3G06', 'P3G09', 'P3G10', 'P3G11']
gens = [70, 1410, 2640, 5150, 7530, 10150]
chromo_mut_positions = defaultdict(list)
chromos = ['chrI', 'chrII', 'chrIII', 'chrIV', 'chrIX', 'chrMito', 'chrV', 'chrVI', 'chrVII',
'chrVIII', 'chrX', 'chrXI', 'chrXII', 'chrXIII', 'chrXIV', 'chrXV', 'chrXVI', '2-micron']
#done_wells = [i.split('/')[-1].split('.')[0] for i in glob('../../Output/Browser/evidence_sams/*.tsv')]
for well in wells[start_well_index*10:start_well_index*10+10]:
f = '../../Output/WGS/combined_option/well_output/' + well + '_filtered.tsv'
otime = time.time()
#if well not in done_wells:
print(well)
td = pd.read_csv(f, delimiter='\t')
td['pos2'] = td['POS']
bedfile = '../../Output/Browser/evidence_sams/tmp'+well+'_mut_regions.bed'
td[['CHROM', 'POS', 'pos2']].to_csv(bedfile, sep='\t', index=False, header=False)
for entry in td.as_matrix(['CHROM', 'POS']):
chromo_mut_positions[entry[0]].append(entry[1])
dats = []
for gen in gens:
try:
outsam = '../../Output/Browser/evidence_sams/tmp/G'+str(gen)+'_'+well+'.tsv'
subprocess.call(['samtools view ../../Output/WGS/work/G' + str(gen) + '_' + well + '.sam' + ' -L ' + bedfile + ' -o ' + outsam], shell=True)
td = pd.read_csv(outsam, delimiter='\t', header=None, names=sam_cols, index_col=False)
if gen in [70, 2640, 7530]:
outsam2 = '../../Output/Browser/evidence_sams/tmp/G'+str(gen)+'_'+well+'_2.tsv'
subprocess.call(['samtools view ../../Output/WGS/lane_w_R2_issues_work/G' + str(gen) + '_' + well + '.sam' + ' -L ' + bedfile + ' -o ' + outsam2], shell=True)
td = pd.concat([td, pd.read_csv(outsam2, delimiter='\t', header=None, names=sam_cols, index_col=False)])
td['Gen'] = [gen]*len(td)
td['Evidence_for'] = td.apply(lambda r: is_evidence(r, chromo_mut_positions), axis=1)
td['Aligned'] = td.apply(lambda r: alignment(r), axis=1)
dats.append(td[['RNAME', 'POS', 'Gen', 'Aligned']])
except FileNotFoundError:
print('File missing for', str(gen), well)
full_dat = | pd.concat(dats) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_projection(mssql_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_spja(mssql_url: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table AS a, test_str AS b
WHERE a.test_int = b.id AND test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(mssql_url, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64"),
},
)
df = df.sort_values("sum").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_some_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < 1"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([0], dtype="int64"),
"test_nullint": pd.Series([5], dtype="Int64"),
"test_str": pd.Series(["a"], dtype="object"),
"test_float": pd.Series([3.1], dtype="float"),
"test_bool": pd.Series([None], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_types(mssql_url: str) -> None:
query = "SELECT * FROM test_types"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int1": pd.Series([0, 255, None], dtype="Int64"),
"test_int2": pd.Series([-32768, 32767, None], dtype="Int64"),
"test_int4": pd.Series([-2147483648, 2147483647, None], dtype="Int64"),
"test_int8": pd.Series(
[-9223372036854775808, 9223372036854775807, None], dtype="Int64"
),
"test_float24": pd.Series([None, 1.18e-38, 3.40e38], dtype="float"),
"test_float53": | pd.Series([None, -2.23e-308, 1.79e308], dtype="float") | pandas.Series |
import pandas as pd
class BarBase(object):
pass
class Current_bar(BarBase):
def __init__(self):
self._cur_bar_list = []
def add_new_bar(self, new_bar):
"添加新行情,会缓存第n条当前行情,和第n+1条行情,共两条"
self._cur_bar_list.pop(0) if len(self._cur_bar_list) == 2 else None
self._cur_bar_list.append(new_bar)
@property
def cur_data(self):
return self._cur_bar_list[0]
@property
def next_data(self):
return self._cur_bar_list[1]
@property
def cur_date(self):
return self._cur_bar_list[0]["date"]
@property
def cur_open(self):
return self._cur_bar_list[0]["open"]
@property
def cur_high(self):
return self._cur_bar_list[0]["high"]
@property
def cur_low(self):
return self._cur_bar_list[0]["low"]
@property
def cur_close(self):
return self._cur_bar_list[0]["close"]
@property
def next_date(self):
return self._cur_bar_list[1]["date"]
@property
def next_open(self):
return self._cur_bar_list[1]["open"]
@property
def next_high(self):
return self._cur_bar_list[1]["high"]
@property
def next_low(self):
return self._cur_bar_list[1]["low"]
@property
def next_close(self):
return self._cur_bar_list[1]["close"]
class Bar(BarBase):
"""
Bar主要存储所有Feed的OHLC数据,会在onepy.py中整合
在feed中每次添加new_bar前需先set_insrument,以便识别添加了哪个feed
"""
def __init__(self, instrument):
self._bar_dict = {instrument: []}
self._instrument = instrument
self._data_name = None # 用来定义getitem的返回值
def __getitem__(self, item):
return self._bar_dict
def _initialize(self):
"""将数据清空"""
self._bar_dict = {}
def _combine_all_feed(self, new_bar_dict):
"""只运行一次,将所有feed整合到一起"""
self._bar_dict.update(new_bar_dict)
def __getitem_func(self, given):
if isinstance(given, slice):
# do your handling for a slice object:
start = given.start if given.start is not None else 0
stop = given.stop if given.stop is not None else len(self.data)
# 处理切片为负的情况
length = len(self.data)
start = length + start if start < 0 else start
stop = length + stop if stop < 0 else stop
original_data = self.data[start:stop] # 格式为[{},{},{}...]
data = [i[self._data_name] for i in original_data]
return data
else:
# Do your handling for a plain index
return self._bar_dict[self.instrument][given]["close"]
def __create_data_cls(self):
cls = type("OHLC", (), {})
cls.data = self._bar_dict[self.instrument]
cls.__getitem__ = self.__getitem_func
return cls
def set_instrument(self, instrument):
self._instrument = instrument
def add_new_bar(self, new_bar):
"添加新行情"
self._bar_dict[self.instrument].append(new_bar)
@property
def instrument(self):
return self._instrument
@property
def data(self):
return self._bar_dict[self.instrument]
@property
def total_dict(self):
return self._bar_dict
@property
def df(self):
return | pd.DataFrame(self._bar_dict[self.instrument]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from flask_socketio import SocketIO, emit
import time
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import ast
from sklearn.metrics import mean_absolute_error,mean_squared_error
from statsmodels.tsa import arima_model
from statsmodels.tsa.statespace.sarimax import SARIMAX
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.stats.outliers_influence import variance_inflation_factor
from copy import deepcopy
import joblib
from sklearn.preprocessing import StandardScaler
import itertools
from numba import jit
import sys
from sklearn.externals import joblib
import pandas as pd
from concurrent.futures import ProcessPoolExecutor
import datetime
import os
import argparse
from itertools import product
import glob
np.random.seed(0)
import logging
logging.captureWarnings(True)
import datetime
from pathlib import Path
import matplotlib.pyplot as plt
def forecastr(data,forecast_settings,column_headers,freq_val,build_settings):
"""
Background: This function will take the data from the csv and forecast out x number of days.
Input:
data: This is a pandas dataframe containing time series data, datetime first column
forecast_settings: This is a list containing values for model type, forecast period length,test_period and seasonality parameters
column_headers: List containing the name of the date and metric
freq_val: String containing "D","M","Y"
build_settings: String determining whether this is an initial or updated forecast.
Output:
[y_hat,dates,m,csv_ready_for_export]: A list containing forecasted data, dimension, model and data for the csv export
"""
##### Variables, Model Settings & Facebook Prophet Hyper Parameters #####
# Initial Variables
build = build_settings # Determine the build_setting - either initial or update forecast settings.
dimension = column_headers[0] # date
metric = column_headers[1] # metric name
# Rename the columns so we can use FB Prophet
data.rename(columns={dimension: "ds", metric: "y"}, inplace=True)
# Hyper-parameters
fs_model_type = forecast_settings[0] # linear or logistic
fs_forecast_period = int(forecast_settings[1]) # forecast period
fs_test_period=int(forecast_settings[2])# test period
if fs_model_type=="Moving_Average":
my_type="ma"
elif fs_model_type=="SARIMA":
my_type="sarima"
d = range(0,2)
p = q = range(0, 3)
pdq = list(itertools.product(p, d, q))
m_1= range(0,13)
seasonal_pdq = [(x[0], x[1], x[2], x[3]) for x in list(itertools.product(p, d, q,m_1))]
pdq = pdq[1:]
# Instantiate with prophet_arg_vals that are not auto, 0 or False.
model=prediction_func(data,pdq=pdq,seasonal_pdq=seasonal_pdq,test_day=fs_test_period,model_type=my_type)
# Status update
emit('processing', {'data': 'model has been fit'})
# Let's create a new data frame for the forecast which includes how long the user requested to forecast out in time units and by time unit type (eg. "D", "M","Y")
#future = m.make_future_dataframe(periods=fs_period, freq=freq_val)
# If fs_model_type = 'logistic', create a column in future for carrying_capacity and saturated_minimum
'''
if fs_model_type == 'logistic':
future['cap'] = fs_carrying_capacity
future['floor'] = fs_saturated_minimum
else:
print('no cap or floor needed as it is a linear model.')
'''
# Let's predict the future :)
y_forecast=model.forecast(fs_forecast_period+2).tolist()
y_hat=model.predict().tolist()
y_hat=y_hat[1:]
preds=y_hat+y_forecast
print("forecast length",len(y_forecast))
print("actual length",len(y_hat))
print("total pred length",len(preds))
##### Send y_hat and dates to a list, so that they can be graphed easily when set in ChartJS
data_new=data.append(pd.DataFrame({"ds": [str(a).split(" ")[0] for a in pd.date_range(start=pd.to_datetime(data.ds.iloc[-1]),periods=fs_forecast_period,freq="MS")] }))
print("data new shape: ",data_new.shape)
data_new=data_new.reset_index(drop=True)
data_new["yhat"]=preds
data_new["yhat_upper"]=preds
data_new["yhat_lower"]=preds
#y_hat = data_new['preds'].tolist()
dates = data_new['ds'].apply(lambda x: str(x).split(' ')[0]).tolist()
##### Lets see how the forecast compares to historical performance #####
# First, lets sum up the forecasted metric
forecast_sum = sum(y_hat)
forecast_mean = np.mean(y_hat)
# Now lets sum up the actuals for the same time interval as we predicted
actual_sum = data_new["y"].sum()
actual_mean = data_new["y"].mean()
difference = '{0:.1%}'.format(((forecast_sum - actual_sum) / forecast_sum))
difference_mean = '{0:.1%}'.format(((forecast_mean - actual_mean) / forecast_mean))
forecasted_vals = ['{0:.1f}'.format(forecast_sum),'{0:.1f}'.format(actual_sum),difference]
forecasted_vals_mean = ['{0:.1f}'.format(forecast_mean),'{0:.1f}'.format(actual_mean),difference_mean]
####### Formatting data for CSV Export Functionality ##########
# First, let's merge the original and forecast dataframes
#data_for_csv_export = pd.merge(forecast,data,on='date',how='left')
# Select the columns we want to include in the export
data_new = data_new[['ds','y','yhat','yhat_upper','yhat_lower']]
# Rename y and yhat to the actual metric names
data_new.rename(index=str, columns={'ds': 'date', 'y': metric, 'yhat': metric + '_forecast','yhat_upper':metric + '_upper_forecast','yhat_lower':metric + '_lower_forecast'}, inplace=True)
# replace NaN with an empty val
data_new = data_new.replace(np.nan, '', regex=True)
# Format timestamp
data_new['date'] = data_new['date'].apply(lambda x: str(x).split(' ')[0])
# Create dictionary format for sending to csv
#csv_ready_for_export = export_formatted.to_dict('records')
csv_ready_for_export = data_new.to_dict('records')
print(data_new.tail())
# print(y_hat)
# print(csv_ready_for_export)
return [preds,dates,model,csv_ready_for_export,forecasted_vals, forecasted_vals_mean,data_new]
def validate_model(model,dates):
"""
Background:
This model validation function is still under construction and will be updated during a future release.
"""
count_of_time_units = len(dates)
#print(count_of_time_units)
initial_size = str(int(count_of_time_units * 0.20)) + " days"
horizon_size = str(int(count_of_time_units * 0.10)) + " days"
period_size = str(int(count_of_time_units * 0.05)) + " days"
df_cv = cross_validation(model, initial=initial_size, horizon=horizon_size, period=period_size)
#df_cv = cross_validation(model,initial='730 days', period='180 days', horizon = '365 days')
df_p = performance_metrics(df_cv)
#print(df_cv.head(100))
#print(df_p.head(100))
mape_score_avg = str(round(df_p['mape'].mean()*100,2)) + "%"
return mape_score_avg
def check_val_of_forecast_settings(param):
"""
Background:
This function is used to check to see if there is a value (submitted from the user in the UI) for a given Prophet Hyper Parameter. If there is no value or false or auto, return that, else we'll return a float of the param given that the value may be a string.
If the param value is blank, false or auto, it will eventually be excluding from the dictionary being passed in when instantiating Prophet.
"""
# Check hyper parameter value and return appropriate value.
if (param == "") or (param == False) or (param == 'auto'):
new_arg = param
return new_arg
else:
new_arg = float(param)
return new_arg
def get_summary_stats(data,column_headers):
"""
Background:
This function will get some summary statistics about the original dataset being uploaded.
Input:
data: a dataframe with the data from the uploaded csv containing a dimension and metric
column_headers: string of column names for the dimension and metric
Output:
sum_stats: a list containing the count of time units, the mean, std, min and max values of the metric. This data is rendered on step 2 of the UI.
"""
# Set the dimension and metrics
dimension = column_headers[0]
metric = column_headers[1]
time_unit_count = str(data[dimension].count())
print(data[metric].mean())
mean = str(round(data[metric].mean(),2))
print('string of the mean is ' + mean)
std = str(round(data[metric].std(),2))
minimum = str(round(data[metric].min(),2))
maximum = str(round(data[metric].max(),2))
sum_stats = [time_unit_count,mean,std,minimum,maximum]
print(sum_stats)
return sum_stats
def preprocessing(data):
"""
Background: This function will determine which columns are dimensions (time_unit) vs metrics, in addition to reviewing the metric data to see if there are any objects in that column.
Input:
data (df): A dataframe of the parsed data that was uploaded.
Output:
[time_unit,metric_unit]: the appropriate column header names for the dataset.
"""
# Get list of column headers
column_headers = list(data)
# Let's determine the column with a date
col1 = column_headers[0]
col2 = column_headers[-1] #last column
print('the first column is ' + col1)
print("target column is" +col2)
# Get the first value in column 1, which is what is going to be checked.
col1_val = data[col1][0]
print(type(col1_val))
print(data.shape)
# Check to see if the data has any null values
#print('Is there any null values in this data? ' + str(data.isnull().values.any()))
# If there is a null value in the dataset, locate it and emit the location of the null value back to the client, else continue:
#print(data.tail())
print('Is there any null values in this data? ' + str(data.isnull().values.any()))
do_nulls_exist = data.isnull().values.any()
if do_nulls_exist == True:
print('found a null value')
null_rows = | pd.isnull(data) | pandas.isnull |
import streamlit as st
import pandas as pd
import requests
import numpy as np
import wordcloud
import itertools
# Try this?
# https://towardsdatascience.com/topic-modelling-in-python-with-nltk-and-gensim-4ef03213cd21 ... pyLDAvis
num_words = 150
document_limit = 5000
st_time_to_live = 4 * 3600
# Display options
st.set_page_config(
layout="wide",
initial_sidebar_state="expanded",
page_title=("Topic-Model Explorer"),
)
st.title("Topic model explorer")
st.sidebar.title("Options")
n_topics = st.sidebar.slider("Number of Topics", 5, 40, value=30)
n_sort_topic = st.sidebar.slider("Topic sort order by", 0, n_topics - 1)
# Custom fileupload
st.sidebar.markdown("## Custom dataset")
f_upload = st.sidebar.file_uploader('Upload a CSV, with the target column named "text"')
if f_upload is None:
f_dataset = "example_data/reddit_suicide_data.csv"
else:
f_dataset = f_upload
limit_documents = st.sidebar.checkbox(f"Limit to {document_limit}", value=True)
if not limit_documents:
document_limit = None
df = pd.read_csv(f_dataset, nrows=document_limit, error_bad_lines=False)
n_documents = len(df)
if f_upload:
st.write(f"Loaded {len(df):,} documents from `{f_dataset.name}`")
else:
st.write(f"Loaded {len(df):,} documents from `{f_dataset}`")
@st.cache(ttl=st_time_to_live)
def preprocess_input(f_dataset):
with st.spinner("*Preprocessing text with spaCy*"):
url = "http://127.0.0.1:8000/LDA/preprocess"
params = {"text_input": df["text"].values.tolist()}
r = requests.get(url, json=params)
js = r.json()
return js
@st.cache(ttl=st_time_to_live)
def train_tokenized(tokenized, n_topics):
data_input = {
"text_tokenized": tokenized["text_tokenized"],
"n_topics": n_topics,
}
with st.spinner("*Running MALLET*"):
url = "http://127.0.0.1:8000/LDA/train"
r = requests.get(url, json=data_input)
js = r.json()
words = pd.read_json(js["words"], orient="split")
topics = | pd.read_json(js["topics"], orient="split") | pandas.read_json |
import pandas as pd
import numpy as np
DATA_PATH = 'rawdata/' #where the raw data files are
ALT_OUTPUT_PATH = 'alt_output/' #there will be many files produced -- the files that are not "the main ones"
# are placed in this directory
feasDF = pd.read_csv(DATA_PATH+"mipdev_feasibility.csv") #read the feasibilty file into a data frame
feasible_DF = feasDF[feasDF['Code'] != 'inf'].groupby('Problem Name').count()
infeasible_DF = feasDF[feasDF['Code'] == 'inf'].groupby('Problem Name').count()
feasible_instances = set(feasible_DF.index)
infeasible_instances = set(infeasible_DF.index)
#some of the instances do not have a complete set of labels
# (all are missing data from either emphasis_optimality or seperate_aggressive or both)
# and when a seed is missing any time data at all, the PDInt is also missing
instances_with_incomplete_data = set([ 'buildingenergy',
'neos-565672',
'neos-848589',
'neos-872648',
'neos-873061',
'netdiversion',
'npmv07',
'ns1758913',
'ofi',
'opm2-z11-s8',
'sing245',
'wnq-n100-mw99-14'])
feasible_instances = feasible_instances.difference(instances_with_incomplete_data)
infeasible_instances = infeasible_instances.difference(instances_with_incomplete_data)
STATUS_COLS = range(1,15) #which columns contain the completion status of an algorithm
DATA_COLS = range(15,29) #which columns contain the PD Ints
COL_OFFSET = 14 #the number of columns offsetting the completion status from PD Ints
PD = pd.read_csv(DATA_PATH+"mipdev_integral_data.csv") #read the PD Int file into a data frame
PD = PD.drop([0,1]) #remove the extraneous header rows
PD = PD.drop(PD.columns[1], axis=1) #drop seed #
Times = pd.read_csv(DATA_PATH+"mipdev_time_data.csv") #read the time data file into a data frame
Times = Times.drop([0,1]) #remove the extraneous header rows
Times = Times.drop(Times.columns[1], axis=1) #drop seed #
#in the time data files, the status and data columns are opposite that of the PDInt file for some reason
# the next line switches them back so that the status cols are in front of the data cols
Times = Times[[Times.columns[0]]+Times.columns[DATA_COLS].tolist()+Times.columns[STATUS_COLS].tolist()]
Regions = | pd.read_csv(DATA_PATH+"regions_time_data.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import remixt.bamreader
import os
empty_data = {
'fragments': remixt.bamreader.create_fragment_table(0),
'alleles': remixt.bamreader.create_allele_table(0),
}
def _get_key(record_type, chromosome):
return '/{}/chromosome_{}'.format(record_type, chromosome)
def _unique_index_append(store, key, data):
try:
nrows = store.get_storer(key).nrows
except (AttributeError, KeyError):
nrows = 0
data.index = pd.Series(data.index) + nrows
if nrows == 0:
store.put(key, data, format='table')
else:
store.append(key, data)
def merge_overlapping_seqdata(outfile, infiles, chromosomes):
out_store = pd.HDFStore(outfile, 'w', complevel=9, complib='blosc')
index_offsets = pd.Series(0, index=chromosomes, dtype=np.int64)
for _id, infile in infiles.items():
store = pd.HDFStore(infile)
tables = store.keys()
for chromosome in chromosomes:
allele_table = '/alleles/chromosome_{}'.format(chromosome)
fragment_table = '/fragments/chromosome_{}'.format(chromosome)
if allele_table not in tables:
print("missing table {}".format(allele_table))
continue
if fragment_table not in tables:
print("missing table {}".format(fragment_table))
continue
alleles = store[allele_table]
fragments = store[fragment_table]
alleles['fragment_id'] = alleles['fragment_id'].astype(np.int64)
fragments['fragment_id'] = fragments['fragment_id'].astype(np.int64)
alleles['fragment_id'] += index_offsets[chromosome]
fragments['fragment_id'] += index_offsets[chromosome]
index_offsets[chromosome] = max(alleles['fragment_id'].max(), fragments['fragment_id'].max()) + 1
out_store.append('/alleles/chromosome_{}'.format(chromosome), alleles)
out_store.append('/fragments/chromosome_{}'.format(chromosome), fragments)
store.close()
out_store.close()
def create_chromosome_seqdata(seqdata_filename, bam_filename, snp_filename, chromosome, max_fragment_length, max_soft_clipped, check_proper_pair):
""" Create seqdata from bam for one chromosome.
Args:
seqdata_filename(str): seqdata hdf store to write to
bam_filename(str): bam from which to extract read information
snp_filename(str): TSV chromosome, position file listing SNPs
chromosome(str): chromosome to extract
max_fragment_length(int): maximum length of fragments generating paired reads
max_soft_clipped(int): maximum soft clipping for considering a read concordant
check_proper_pair(boo): check proper pair flag
"""
reader = remixt.bamreader.AlleleReader(
bam_filename,
snp_filename,
chromosome,
max_fragment_length,
max_soft_clipped,
check_proper_pair,
)
with pd.HDFStore(seqdata_filename, 'w', complevel=9, complib='zlib') as store:
while reader.ReadAlignments(10000000):
_unique_index_append(store, _get_key('fragments', chromosome), reader.GetFragmentTable())
_unique_index_append(store, _get_key('alleles', chromosome), reader.GetAlleleTable())
def create_seqdata(seqdata_filename, bam_filename, snp_filename, max_fragment_length, max_soft_clipped, check_proper_pair, tempdir, chromosomes):
try:
os.makedirs(tempdir)
except:
pass
all_seqdata = {}
for chrom in chromosomes:
chrom_seqdata = os.path.join(tempdir, "{}_seqdata.h5".format(chrom))
all_seqdata[chrom] = chrom_seqdata
create_chromosome_seqdata(
chrom_seqdata, bam_filename, snp_filename,
chrom, max_fragment_length, max_soft_clipped,
check_proper_pair
)
merge_seqdata(seqdata_filename, all_seqdata)
def merge_seqdata(out_filename, in_filenames):
""" Merge seqdata files for non-overlapping sets of chromosomes
Args:
out_filename(str): seqdata hdf store to write to
out_filename(dict): seqdata hdf store to read from
"""
with pd.HDFStore(out_filename, 'w', complevel=9, complib='zlib') as out_store:
for in_filename in in_filenames.values():
with | pd.HDFStore(in_filename, 'r') | pandas.HDFStore |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 18:48:55 2019
@author: shday
"""
import math
from collections import namedtuple
import pandas as pd
import numpy as np
from dateutil import parser
import dash_html_components as html
import plotly.express as px
import pytz
import plotly.graph_objects as go
import urllib
import string
#import pkuseg
from wordcloud import WordCloud
import json
import re
import ast
import os
# selected_features_tweetjs = ['created_at', 'favorite_count', 'favorited', 'id_str',
# 'in_reply_to_screen_name', 'in_reply_to_status_id_str', 'lang',
# 'retweet_count', 'retweeted', 'source', 'full_text']
pretty_weekday_dict = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thurs', 4: 'Fri', 5: 'Sat', 6: 'Sun'}
# load zh stopwords for wordcloud
# zh_stopword_list_url = 'https://raw.githubusercontent.com/stopwords-iso/stopwords-zh/master/stopwords-zh.txt'
# zh_stopwords = urllib.request.urlopen(zh_stopword_list_url).read().decode('utf-8').split('\n')
def parse_source_text(text):
return text.split('>')[1].split('<')[0]
def convert_tweet_json_to_df_row(json_data, selected_feature):
temp_list = []
for f in selected_feature:
if f == 'source':
cleaned_source = parse_source_text(json_data[f])
temp_list.append(cleaned_source)
elif f in ['friends_count', 'followers_count', 'protected']:
if 'user' in json_data:
temp_list.append(json_data['user'][f])
else:
temp_list.append(0) if f in ['friends_count', 'followers_count'] else temp_list.append(False)
elif f == 'created_at': # parsed at the display time instead
dt_object = parser.parse(json_data[f])
temp_list.append(dt_object)
# TODO: get the in reply to link working
# elif f == 'in_reply_to_status_id_str':
# if json_data[f] is not None:
# html_link = "https://twitter.com/"+json_data['in_reply_to_screen_name']+'/status/'+ json_data[f]
# temp_list.append(html_link)
# else:
# temp_list.append(None)
else:
temp_list.append(json_data[f]) if f in json_data else temp_list.append("")
return pd.DataFrame([temp_list], columns = selected_feature, index=[0])
def tweet2dt(json_list, selected_features):
for i in range(len(json_list)):
if i == 0:
processed_df = convert_tweet_json_to_df_row(json_list[i], selected_features)
else:
temp = convert_tweet_json_to_df_row(json_list[i], selected_features)
processed_df = processed_df.append(temp, ignore_index=True)
# remove duplicates
processed_df.drop_duplicates(inplace=True)
return processed_df
def extract_owner_from_json():
res = read_json_file(os.environ.get('PARSED_TWEETS_PATH', ''))
first_tweet = res[0]
if 'user' not in first_tweet: # from official twitter archive - only tweets objects are in it
user_info = {}
base_path, filename = os.path.split(os.environ.get('ACCOUNT_INFO_PATH', ''))
account_info = simple_parse_twitter_archive_json(base_path, filename)
user_info['screen_name'] = account_info['account']['username']
user_info['id_str'] = account_info['account']['accountId']
return user_info
else:
return first_tweet['user']
def retrieve_data_from_db(db_object, user_id):
query = "SELECT * FROM tweets where user_id = {};".format(user_id)
tweet_data = pd.read_sql(query, con=db_object.get_connection(), parse_dates=[1])
tweet_data['created_at'] = | pd.to_datetime(tweet_data['created_at'], errors='coerce') | pandas.to_datetime |
import numpy as np
import pandas as pd
def load(path):
df = pd.read_csv(path,
encoding="utf-8",
delimiter=";",
quotechar="'").rename(
columns={
"Text": "text",
"Label": "label"
})
train, dev, test = split_df(df, 'label', 0.8, 0.1, 0.1)
train_x = list(train["text"])
train_y_dummies = | pd.get_dummies(train["label"]) | pandas.get_dummies |
# functions to run velocyto and scvelo
import numpy as np
import pandas as pd
# import velocyto as vcy
# import scvelo as scv
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
from .moments import *
from anndata import AnnData
def vlm_to_adata(vlm, n_comps=30, basis="umap", trans_mats=None, cells_ixs=None):
""" Conversion function from the velocyto world to the dynamo world.
Code original from scSLAM-seq repository
Parameters
----------
vlm: VelocytoLoom Object
The VelocytoLoom object that will be converted into adata.
n_comps: `int` (default: 30)
The number of pc components that will be stored.
basis: `str` (default: `umap`)
The embedding that will be used to store the vlm.ts attribute. Note that velocyto doesn't usually use
umap as embedding although `umap` as set as default for the convenience of dynamo itself.
trans_mats: None or dict
A dict of all relevant transition matrices
cell_ixs: list of int
These are the indices of the subsampled cells
Returns
-------
adata: AnnData object
"""
from collections import OrderedDict
# set obs, var
obs, var = pd.DataFrame(vlm.ca), pd.DataFrame(vlm.ra)
if "CellID" in obs.keys():
obs["obs_names"] = obs.pop("CellID")
if "Gene" in var.keys():
var["var_names"] = var.pop("Gene")
if hasattr(vlm, "q"):
var["gamma_b"] = vlm.q
if hasattr(vlm, "gammas"):
var["gamma"] = vlm.gammas
if hasattr(vlm, "R2"):
var["gamma_r2"] = vlm.R2
# rename clusters to louvain
try:
ix = np.where(obs.columns == "Clusters")[0][0]
obs_names = list(obs.columns)
obs_names[ix] = "louvain"
obs.columns = obs_names
# make louvain a categorical field
obs["louvain"] = pd.Categorical(obs["louvain"])
except:
print("Could not find a filed 'Clusters' in vlm.ca.")
# set layers basics
layers = OrderedDict(
unspliced=csr_matrix(vlm.U.T),
spliced=csr_matrix(vlm.S.T),
velocity_S=csr_matrix(vlm.velocity.T),
)
# set X_spliced / X_unspliced
if hasattr(vlm, "S_norm"):
layers["X_spliced"] = csr_matrix(2**vlm.S_norm - 1).T
if hasattr(vlm, "U_norm"):
layers["X_unspliced"] = csr_matrix(2**vlm.U_norm - 1).T
if hasattr(vlm, "S_sz") and not hasattr(vlm, "S_norm"):
layers["X_spliced"] = csr_matrix(vlm.S_sz).T
if hasattr(vlm, "U_sz") and hasattr(vlm, "U_norm"):
layers["X_unspliced"] = csr_matrix(vlm.U_sz).T
# set M_s / M_u
if hasattr(vlm, "Sx"):
layers["M_s"] = csr_matrix(vlm.Sx).T
if hasattr(vlm, "Ux"):
layers["M_u"] = csr_matrix(vlm.Ux).T
if hasattr(vlm, "Sx_sz") and not hasattr(vlm, "Sx"):
layers["M_s"] = csr_matrix(vlm.Sx_sz).T
if hasattr(vlm, "Ux_sz") and hasattr(vlm, "Ux"):
layers["M_u"] = csr_matrix(vlm.Ux_sz).T
# set obsm
obsm = {}
obsm["X"] = vlm.pcs[:, : min(n_comps, vlm.pcs.shape[1])]
# set basis and velocity on the basis
if basis is not None:
obsm["X_" + basis] = vlm.ts
obsm["velocity_" + basis] = vlm.delta_embedding
# set transition matrix:
uns = {}
if hasattr(vlm, "corrcoef"):
uns["transition_matrix"] = vlm.corrcoef
if hasattr(vlm, "colorandum"):
uns["louvain_colors"] = list(np.unique(vlm.colorandum))
# add uns annotations
if trans_mats is not None:
for key, value in trans_mats.items():
uns[key] = trans_mats[key]
if cells_ixs is not None:
uns["cell_ixs"] = cells_ixs
if hasattr(vlm, "embedding_knn"):
from .connectivity import adj_to_knn
n_neighbors = np.unique((vlm.embedding_knn > 0).sum(1)).min()
ind_mat, dist_mat = adj_to_knn(
vlm.emedding_knn, n_neighbors
)
uns["neighbors"] = {"indices": ind_mat}
obsp = {'distances': dist_mat, "connectivities": vlm.emedding_knn}
uns["dynamics"] = {
"filter_gene_mode": None,
"t": None,
"group": None,
"X_data": None,
"X_fit_data": None,
"asspt_mRNA": "ss",
"experiment_type": "conventional",
"normalized": True,
"model": "deterministic",
"est_method": "ols",
"has_splicing": True,
"has_labeling": False,
"has_protein": False,
"use_smoothed": True,
"NTR_vel": False,
"log_unnormalized": True,
}
# set X
if hasattr(vlm, "S_norm"):
X = csr_matrix(vlm.S_norm.T)
else:
X = csr_matrix(vlm.S_sz.T) if hasattr(vlm, "S_sz") else csr_matrix(vlm.S.T)
# create an anndata object with Dynamo characteristics.
dyn_adata = AnnData(X=X, obs=obs, obsp=obsp, obsm=obsm, var=var, layers=layers, uns=uns)
return dyn_adata
def converter(data_in, from_type="adata", to_type="vlm", dir="."):
"""
convert adata to loom object
- we may save_fig to a temp directory automatically
- we may write a on-the-fly converter which doesn't involve saving and reading files
"""
if from_type == "adata":
if to_type == "vlm":
file = dir + "/data.loom"
data_in.write_loom(file)
data_out = vcy.VelocytoLoom(file)
elif from_type == "vlm":
if to_type == "adata":
data_out = vlm_to_adata(vlm)
data_out.ra["Gene"] = data_out.ra["var_names"] # required by plot_phase_portraits
colors20 = np.vstack(
(
plt.cm.tab20b(np.linspace(0.0, 1, 20))[::2],
plt.cm.tab20c(np.linspace(0, 1, 20))[1::2],
)
)
def colormap_fun(x: np.ndarray) -> np.ndarray:
return colors20[np.mod(x, 20)]
data_out.colorandum = colormap_fun([1] * data_out.S.shape[1])
return data_out
def run_velocyto(adata):
"""
1. convert adata to vlm data
2. set up PCA, UMAP, etc.
3. estimate the gamma parameter
"""
vlm = converter(adata)
# U_norm: log2(U_sz + pcount)
# vlm.U_sz: norm_factor * U
# S_norm: log2(S_sz + pcount)
# vlm.S_sz norm_factor * S
# vlm.Ux: smoothed unspliced
# vlm.Sx: smoothed spliced
# vlm.Ux_sz: smoothed unspliced -- old code
# vlm.Sx_sz: smoothed spliced -- old code
vlm.normalize() # add U_norm, U_sz, S_norm, S_sz
vlm.perform_PCA()
vlm.knn_imputation() # Ux, Sx, Ux_sz, Sx_sz
vlm.pcs = adata.X # pcs: cell x npcs ndarray
# vlm.Sx = vlm.S_sz
# vlm.Ux = vlm.U_sz
# vlm.Sx_sz = vlm.S_sz
# vlm.Ux_sz = vlm.U_sz
# gamma fit
vlm.fit_gammas() # limit_gamma = False, fit_offset = True, use_imputed_data = False, use_size_norm = False
# estimate velocity
vlm.predict_U()
vlm.calculate_velocity()
# predict future state after dt
vlm.calculate_shift() # assumption = 'constant_velocity'
vlm.extrapolate_cell_at_t() # delta_t = 1.
return vlm
def run_scvelo(adata):
"""
1. set up PCA, UMAP, etc.
2. estimate gamma and all other parameters
3. return results (adata.var['velocity_gamma'])
"""
# scv.pp.filter_and_normalize(adata, min_counts=2, min_counts_u=1, n_top_genes=3000)
scv.pp.moments(adata) # , n_pcs = 12, n_neighbors = 15, mode = 'distances'
scv.tl.velocity(adata)
scv.tl.velocity_graph(adata)
# how to fit other parameters, beta, etc.?
return adata
def mean_var_by_time(X, Time):
import pandas as pd
exp_data = pd.DataFrame(X)
exp_data["Time"] = Time
mean_val = exp_data.groupby(["Time"]).mean()
var_val = exp_data.groupby(["Time"]).var()
return mean_val.values, var_val.values
def run_dynamo(adata, normalize=True, init_num=1, sample_method="lhs"):
time = adata.obs["Step"].values
uniqe_time = list(set(time))
gene_num = adata.X.shape[1]
# prepare data
import numpy as np
x_data = np.zeros((8, len(uniqe_time), gene_num)) # use unique time
uu, ul, su, sl = (
adata.layers["uu"].toarray(),
adata.layers["ul"].toarray(),
adata.layers["su"].toarray(),
adata.layers["sl"].toarray(),
)
uu = np.log2(uu + 1) if normalize else uu
ul = np.log2(ul + 1) if normalize else ul
su = np.log2(su + 1) if normalize else su
sl = np.log2(sl + 1) if normalize else sl
x_data[0], x_data[4] = mean_var_by_time(uu, time)
x_data[1], x_data[5] = mean_var_by_time(ul, time)
x_data[2], x_data[6] = mean_var_by_time(su, time)
x_data[3], x_data[7] = mean_var_by_time(sl, time)
# estimation all parameters
p0_range = {
"a": [0, 1],
"b": [0, 1],
"la": [0, 1],
"alpha_a": [10, 1000],
"alpha_i": [0, 10],
"sigma": [0, 1],
"beta": [0, 10],
"gamma": [0, 10],
}
estm = estimation(list(p0_range.values()))
param_out = pd.DataFrame(
index=adata.var.index,
columns=["a", "b", "la", "alpha_a", "alpha_i", "sigma", "beta", "gamma"],
)
for i in range(gene_num):
cur_x_data = x_data[:, :, i].squeeze()
param_out.iloc[i, :], cost = estm.fit_lsq(
uniqe_time, cur_x_data, p0=None, n_p0=init_num, sample_method=sample_method
)
# estimate only on the spliced and unspliced dataset
# estimate on the labeled and unlabeled dataset
# store the fitting result in adata.uns
adata.uns.update({"dynamo": param_out})
return adata
def run_dynamo_simple_fit(adata, log=True):
ncells, gene_num = adata.X.shape
# estimation all parameters
param_out = pd.DataFrame(index=adata.var.index, columns=["alpha", "gamma"])
u, s = adata.layers["unspliced"], adata.layers["spliced"]
velocity_u, velocity_s = u, s
for i in range(gene_num):
cur_u, cur_s = u[:, i], s[:, i]
gamma = fit_gamma(cur_u.toarray().squeeze(), cur_s.toarray().squeeze())
alpha = np.mean(cur_s)
velocity_u[:, i] = cur_u - cur_s * gamma
velocity_s[:, i] = cur_s / (1 - np.exp(-1)) - cur_u
param_out.iloc[i, :] = [alpha, gamma]
adata.layers["velocity_u"] = velocity_u
adata.layers["velocity_s"] = velocity_s
adata.uns.update({"dynamo_simple_fit": param_out})
return adata
def run_dynamo_labelling(adata, log=True, group=False):
ncells, gene_num = adata.X.shape
# estimation all parameters
T = adata.obs["Time"]
groups = [""] if group == False else np.unique(adata.obs[group])
param_out = pd.DataFrame(
index=adata.var.index,
columns=[i + "_" + j for j in groups for i in ["alpha", "gamma", "u0", "l0"]],
)
L, U = adata.layers["L"], adata.layers["U"]
velocity_u, velocity_s = L, U
for i in range(gene_num):
all_parm = []
for cur_grp in groups.tolist():
cur_L, cur_U = (
(L[:, i], U[:, i])
if cur_grp == ""
else (
L[adata.obs[group] == cur_grp, i],
U[adata.obs[group] == cur_grp, i],
)
)
if log:
cur_U, cur_L = (
np.log(cur_U.toarray().squeeze() + 1),
np.log(cur_L.toarray().squeeze() + 1),
)
else:
cur_U, cur_L = cur_U.toarray().squeeze(), cur_L.toarray().squeeze()
gamma, l0 = fit_gamma_labelling(T, cur_L, mode=None)
alpha, u0 = fit_alpha_labelling(T, cur_U, gamma, mode=None)
tmp = [alpha, gamma, u0, l0]
all_parm.extend(tmp)
velocity_u[:, i] = (cur_L - cur_U * gamma)[:, None]
velocity_s[:, i] = (cur_U / (1 - np.exp(-1)) - cur_L)[:, None]
adata.layers[cur_grp + "velocity_u"] = velocity_u
adata.layers[cur_grp + "velocity_s"] = velocity_s
param_out.iloc[i, :] = all_parm
adata.uns.update({"dynamo_labelling": param_out})
return adata
def compare_res(
adata,
velocyto_res,
svelo_res,
dynamo_res,
a_val,
b_val,
la_val,
alpha_a_val,
alpha_i_val,
sigma_val,
beta_val,
gamma_val,
):
"""
function to compare results from velocyto and scvelo with our new method
0. retrieve gamm or gamma with other parameters from velocyto result or scvelo
1. plot the correlation between parameters estimated with different methods
2. calculate the correltion between those parameters
"""
# self._offset, self._offset2, self._beta, self._gamma, self._r2, self._velocity_genes
velocyto_gammas = velocyto_res.gammas
scvelo_gammas = svelo_res.var["velocity_gamma"]
# scatter plot the true gammas with our result
plt.subplots(figsize=(15, 5))
plt.plot()
plt.subplot(131)
plt.plot(gamma_val, velocyto_gammas, "o")
plt.xlabel(r"True $\gamma$")
plt.ylabel(r"$\gamma$ (velocyto)")
plt.subplot(132)
plt.plot(gamma_val, scvelo_gammas, "o")
plt.xlabel(r"True $\gamma$")
plt.ylabel(r"$\gamma$ (scvelo)")
plt.subplot(133)
plt.plot(gamma_val, dynamo_res.uns["dynamo"]["gamma"], "o")
plt.xlabel(r"True $\gamma$")
plt.ylabel(r"$\gamma$ (dynamo)")
# what if we only have a small number of parameters?
plt.subplots(figsize=(15, 5))
plt.plot()
plt.subplot(131)
plt.plot(alpha_a_val, svelo_res.var["fit_alpha"], "o")
plt.xlabel(r"True alpha")
plt.ylabel(r"$\alpha$ (scvelo)")
plt.subplot(132)
plt.plot(beta_val, svelo_res.var["fit_beta"], "o")
plt.xlabel(r"True $\beta$")
plt.ylabel(r"$\beta$ (scvelo)")
plt.subplot(133)
plt.plot(gamma_val, svelo_res.var["fit_gamma"], "o")
plt.xlabel(r"True $\gamma$")
plt.ylabel(r"$\gamma$ (scvelo)")
# param_out = pd.DataFrame(index=adata.var.index, columns=['a', 'b', 'la', 'alpha_a', 'alpha_i', 'sigma', 'beta', 'gamma'])
# what if we only have a small number of parameters?
plt.subplots(figsize=(15, 15))
plt.subplot(331)
plt.plot(a_val, adata.uns["dynamo"]["a"], "o")
plt.xlabel(r"True $a$")
plt.ylabel(r"$a$ (dynamo)")
plt.subplot(332)
plt.plot(b_val, adata.uns["dynamo"]["b"], "o")
plt.xlabel(r"True $b$")
plt.ylabel(r"$b$ (dynamo)")
plt.subplot(333)
plt.plot(la_val, adata.uns["dynamo"]["la"], "o")
plt.xlabel(r"True $l_a$")
plt.ylabel(r"$l_a$ (dynamo)")
plt.subplot(334)
plt.plot(alpha_a_val, adata.uns["dynamo"]["alpha_a"], "o")
plt.xlabel(r"True $\alpha_a$")
plt.ylabel(r"$\alpha_a$ (dynamo)")
plt.subplot(335)
plt.plot(alpha_i_val, adata.uns["dynamo"]["alpha_i"], "o")
plt.xlabel(r"True $\alpha_i$")
plt.ylabel(r"$\alpha_i$ (dynamo)")
plt.subplot(336)
plt.plot(sigma_val, adata.uns["dynamo"]["sigma"], "o")
plt.xlabel(r"True $\sigma$")
plt.ylabel(r"$\sigma$ (dynamo)")
plt.subplot(337)
plt.plot(beta_val, adata.uns["dynamo"]["beta"], "o")
plt.xlabel(r"True $\beta$")
plt.ylabel(r"$\beta$ (dynamo)")
plt.subplot(338)
plt.plot(gamma_val, adata.uns["dynamo"]["gamma"], "o")
plt.xlabel(r"True $\gamma$")
plt.ylabel(r"$\gamma$ (dynamo)")
velocyto_coef = {"gamma": np.corrcoef(gamma_val, velocyto_gammas)[1, 0]}
scvelo_coef = {
"alpha": np.corrcoef(alpha_a_val, svelo_res.var["fit_alpha"])[1, 0],
"beta": np.corrcoef(beta_val, svelo_res.var["fit_beta"])[1, 0],
"gamma": np.corrcoef(gamma_val, svelo_res.var["fit_gamma"])[1, 0],
}
dynamo_coef = {
"a": np.corrcoef(a_val, list(dynamo_res.uns["dynamo"]["a"]))[1, 0],
"b": np.corrcoef(b_val, list(dynamo_res.uns["dynamo"]["b"]))[1, 0],
"la": np.corrcoef(la_val, list(dynamo_res.uns["dynamo"]["la"]))[1, 0],
"alpha_a": np.corrcoef(alpha_a_val, list(dynamo_res.uns["dynamo"]["alpha_a"]))[
1, 0
],
"alpha_i": np.corrcoef(alpha_i_val, list(dynamo_res.uns["dynamo"]["alpha_i"]))[
1, 0
],
"sigma": np.corrcoef(sigma_val, list(dynamo_res.uns["dynamo"]["sigma"]))[1, 0],
"beta": np.corrcoef(beta_val, list(dynamo_res.uns["dynamo"]["beta"]))[1, 0],
"gamma": np.corrcoef(gamma_val, list(dynamo_res.uns["dynamo"]["gamma"]))[1, 0],
}
return {
"velocyto": pd.DataFrame.from_dict(velocyto_coef, orient="index").T,
"scvelo": | pd.DataFrame.from_dict(scvelo_coef, orient="index") | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/30 11:31
Desc: 股票数据-总貌-市场总貌
股票数据-总貌-成交概括
http://www.szse.cn/market/overview/index.html
http://www.sse.com.cn/market/stockdata/statistic/
"""
import warnings
from io import BytesIO
from akshare.utils import demjson
import pandas as pd
import requests
warnings.filterwarnings('ignore')
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 深证证券交易所-总貌
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content))
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = [
'证券类别',
'数量',
'成交金额',
'成交量',
'总股本',
'总市值',
'流通股本',
'流通市值']
temp_df['数量'] = pd.to_numeric(temp_df['数量'])
temp_df['成交金额'] = pd.to_numeric(temp_df['成交金额'])
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'])
temp_df['总股本'] = pd.to_numeric(temp_df['总股本'], errors="coerce")
temp_df['总市值'] = pd.to_numeric(temp_df['总市值'], errors="coerce")
temp_df['流通股本'] = pd.to_numeric(temp_df['流通股本'], errors="coerce")
temp_df['流通市值'] = pd.to_numeric(temp_df['流通市值'], errors="coerce")
return temp_df
def stock_sse_summary() -> pd.DataFrame:
"""
上海证券交易所-总貌
http://www.sse.com.cn/market/stockdata/statistic/
:return: 上海证券交易所-总貌
:rtype: pandas.DataFrame
"""
url = "http://query.sse.com.cn/commonQuery.do"
params = {
'sqlId': 'COMMON_SSE_SJ_GPSJ_GPSJZM_TJSJ_L',
'PRODUCT_NAME': '股票,主板,科创板',
'type': 'inParams',
'_': '1640855495128',
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_json.keys()
temp_df = pd.DataFrame(data_json['result']).T
temp_df.reset_index(inplace=True)
temp_df['index'] = [
"流通股本",
"总市值",
"平均市盈率",
"上市公司",
"上市股票",
"流通市值",
"报告时间",
"-",
"总股本",
"项目",
]
temp_df = temp_df[temp_df['index'] != '-'].iloc[:-1, :]
temp_df.columns = [
'项目',
'股票',
'科创板',
'主板',
]
return temp_df
def stock_sse_deal_daily(date: str = "20220225") -> pd.DataFrame:
"""
上海证券交易所-数据-股票数据-成交概况-股票成交概况-每日股票情况
http://www.sse.com.cn/market/stockdata/overview/day/
:return: 每日股票情况
:rtype: pandas.DataFrame
"""
if int(date) <= 20211224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"searchDate": "-".join([date[:4], date[4:6], date[6:]]),
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_DAYCJGK_C",
"stockType": "90",
"_": "1616744620492",
}
headers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"])
temp_df = temp_df.T
temp_df.reset_index(inplace=True)
temp_df.columns = [
"单日情况",
"主板A",
"股票",
"主板B",
"_",
"股票回购",
"科创板",
]
temp_df = temp_df[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_df["单日情况"] = [
"流通市值",
"流通换手率",
"平均市盈率",
"_",
"市价总值",
"_",
"换手率",
"_",
"挂牌数",
"_",
"_",
"_",
"_",
"_",
"成交笔数",
"成交金额",
"成交量",
"次新股换手率",
"_",
"_",
]
temp_df = temp_df[temp_df["单日情况"] != "_"]
temp_df["单日情况"] = temp_df["单日情况"].astype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"成交笔数",
"平均市盈率",
"换手率",
"次新股换手率",
"流通换手率",
]
temp_df["单日情况"].cat.set_categories(list_custom_new)
temp_df.sort_values("单日情况", ascending=True, inplace=True)
temp_df.reset_index(drop=True, inplace=True)
temp_df['股票'] = pd.to_numeric(temp_df['股票'], errors="coerce")
temp_df['主板A'] = pd.to_numeric(temp_df['主板A'], errors="coerce")
temp_df['主板B'] = pd.t | o_numeric(temp_df['主板B'], errors="coerce") | pandas.to_numeric |
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if roll_func != "sum":
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)
| tm.assert_almost_equal(result0, result1) | pandas._testing.assert_almost_equal |
#!/usr/bin/env python3
import sys
sys.stderr = open(snakemake.log[0], "w")
import numpy as np
import pandas as pd
import allel
chroms = snakemake.params['chroms']
for chrom in chroms:
vcf = allel.read_vcf(f"results/variants/vcfs/annot.missense.{chrom}.vcf")
pos = vcf['variants/POS']
pos1 = pos+1
data = {'chrom':chrom,
'start':pos,
'stop':pos1}
bed = | pd.DataFrame(data) | pandas.DataFrame |
import unittest
import pandas as pd
from data_profiler.profilers import OrderColumn
from . import test_utils
from unittest.mock import patch, MagicMock
from collections import defaultdict
# This is taken from: https://github.com/rlworkgroup/dowel/pull/36/files
# undo when cpython#4800 is merged.
unittest.case._AssertWarnsContext.__enter__ = test_utils.patched_assert_warns
class TestOrderColumn(unittest.TestCase):
@staticmethod
def _update_order(data):
df = pd.Series(data).apply(str)
profiler = OrderColumn(df.name)
profiler.update(df)
return profiler.order
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = OrderColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.sample_size, 0)
self.assertIsNone(profiler.order)
def test_descending(self):
data = ['za', 'z', 'c', 'a']
order = self._update_order(data)
self.assertEqual(order, 'descending')
data = [5, 3, 2]
order = self._update_order(data)
self.assertEqual(order, 'descending')
def test_ascending(self):
data = ['a', 'b', 'z', 'za']
order = self._update_order(data)
self.assertEqual(order, 'ascending')
data = [2, 3, 11]
order = self._update_order(data)
self.assertEqual(order, 'ascending')
def test_constant_value(self):
data = ['a']
order = self._update_order(data)
self.assertEqual(order, 'constant value')
data = ['a', 'a', 'a', 'a', 'a']
order = self._update_order(data)
self.assertEqual(order, 'constant value')
def test_random(self):
data = ['a', 'b', 'ab']
order = self._update_order(data)
self.assertEqual(order, 'random')
data = [1, 11, 4]
order = self._update_order(data)
self.assertEqual(order, 'random')
def test_batch_updates(self):
data = ['a', 'a', 'a']
df = pd.Series(data)
profiler = OrderColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.order, 'constant value')
data = ['a', 'b', 'c']
df = pd.Series(data)
profiler.update(df)
self.assertEqual(profiler.order, 'ascending')
# previous was ascending, should stay ascending bc now receiving const
data = ['c', 'c', 'c']
df = pd.Series(data)
profiler.update(df)
self.assertEqual(profiler.order, 'ascending')
# previous was ascending, should be random now receiving descending
data = ['c', 'b', 'a']
df = pd.Series(data)
profiler.update(df)
self.assertEqual(profiler.order, 'random')
def test_profile(self):
data = [1]
df = pd.Series(data).apply(str)
profiler = OrderColumn(df.name)
expected_profile = dict(
order='constant value',
times={'order' : 2.0}
)
time_array = [float(x) for x in range(4, 0, -1)]
with patch('time.time', side_effect = lambda: time_array.pop()):
profiler.update(df)
profile = profiler.profile
# key and value populated correctly
self.assertDictEqual(expected_profile, profile)
def test_profile_merge(self):
data = [1, 2, 3, 4, 5, 6]
df = pd.Series(data).apply(str)
profiler = OrderColumn("placeholder_name")
profiler.update(df)
data2 = [7, 8, 9, 10]
df2 = pd.Series(data2).apply(str)
profiler2 = OrderColumn("placeholder_name")
profiler2.update(df2)
data3 = [2, 3, 4]
df3 = | pd.Series(data3) | pandas.Series |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not | lib.is_datetime64_array(arr) | pandas._libs.lib.is_datetime64_array |
import numpy as np
import pandas as pd
import csv
import os
from datetime import datetime
class Console_export(object):
def __init__(self, path):
self.path = path + "_sim_summary.txt"
def printLog(self, *args, **kwargs):
print(*args, **kwargs)
with open(self.path,'a') as file:
print(*args, **kwargs, file=file)
def export_statistics_logging(statistics, parameters, resources):
if parameters['EXPORT_NO_LOGS']: return None
statistics['sim_end_time'] = datetime.now()
path = parameters['PATH_TIME']
ce = Console_export(path)
ce.printLog("Start logger ", datetime.now())
"""
Statistics & Logging
"""
# Cut-off last processes at end of simulation
for mach in range(parameters['NUM_MACHINES']):
list_of_stats = ['stat_machines_working', 'stat_machines_changeover', 'stat_machines_broken',
'stat_machines_idle']
for stat in list_of_stats:
if stat == 'stat_machines_working':
if resources['machines'][mach].last_process_start > statistics['time_end']:
resources['machines'][mach].last_process_start -= resources['machines'][mach].last_broken_time
if resources['machines'][mach].last_process_start + resources['machines'][mach].last_process_time > statistics['time_end']:
statistics[stat][mach] -= resources['machines'][mach].last_process_start + resources['machines'][mach].last_process_time - statistics['time_end']
if stat == 'stat_machines_broken':
if resources['machines'][mach].last_broken_start + resources['machines'][mach].last_broken_time > statistics['time_end']:
statistics[stat][mach] -= resources['machines'][mach].last_broken_start + resources['machines'][mach].last_broken_time - statistics['time_end']
statistics['stat_machines_working'] = np.true_divide(statistics['stat_machines_working'], statistics['time_end'])
statistics['stat_machines_changeover'] = np.true_divide(statistics['stat_machines_changeover'], statistics['time_end'])
statistics['stat_machines_broken'] = np.true_divide(statistics['stat_machines_broken'], statistics['time_end'])
statistics['stat_machines_idle'] = np.true_divide(statistics['stat_machines_idle'], statistics['time_end'])
statistics['stat_transp_working'] = np.true_divide(statistics['stat_transp_working'], statistics['time_end'])
statistics['stat_transp_walking'] = np.true_divide(statistics['stat_transp_walking'], statistics['time_end'])
statistics['stat_transp_handling'] = np.true_divide(statistics['stat_transp_handling'], statistics['time_end'])
statistics['stat_transp_idle'] = np.true_divide(statistics['stat_transp_idle'], statistics['time_end'])
ce.printLog("##########################")
ce.printLog("Simulation")
ce.printLog("##########################")
ce.printLog("Start time: ", statistics['sim_start_time'])
ce.printLog("End time: ", statistics['sim_end_time'])
duration = statistics['sim_end_time'] - statistics['sim_start_time']
ce.printLog("Duration [min]: ", duration.total_seconds() / 60.0)
ce.printLog("##########################")
ce.printLog("Orders")
ce.printLog("##########################")
ce.printLog('Finished orders: ', len(statistics['orders_done']))
ce.printLog('Prefilled orders: ', statistics['stat_prefilled_orders'])
cycle_time = 0.0
for order in statistics['orders_done']:
cycle_time += order.eop - order.sop
ce.printLog('Average order cycle time: ', cycle_time / len(statistics['orders_done']))
ce.printLog("##########################")
ce.printLog("Maschines")
ce.printLog("##########################")
ce.printLog("Working - Changeover - Broken - Idle || Total")
for i in range(parameters['NUM_MACHINES']):
ce.printLog("{0:.3f}".format(statistics['stat_machines_working'][i]), "{0:.3f}".format(statistics['stat_machines_changeover'][i]), "{0:.3f}".format(statistics['stat_machines_broken'][i]), "{0:.3f}".format(statistics['stat_machines_idle'][i]), " || ",
"{0:.3f}".format(statistics['stat_machines_working'][i]+statistics['stat_machines_changeover'][i]+statistics['stat_machines_broken'][i]+statistics['stat_machines_idle'][i]))
ce.printLog("--------------------------")
ce.printLog("{0:.3f}".format(np.mean(statistics['stat_machines_working'])), "{0:.3f}".format(np.mean(statistics['stat_machines_changeover'])), "{0:.3f}".format(np.mean(statistics['stat_machines_broken'])), "{0:.3f}".format(np.mean(statistics['stat_machines_idle'])), " || ",
"{0:.3f}".format(np.mean(statistics['stat_machines_working']) + np.mean(statistics['stat_machines_changeover']) + np.mean(statistics['stat_machines_broken']) + np.mean(statistics['stat_machines_idle'])))
ce.printLog("##########################")
ce.printLog("Transport")
ce.printLog("##########################")
ce.printLog("Working - Walking - Handling - Idle || Total")
for i in range(parameters['NUM_TRANSP_AGENTS']):
ce.printLog("{0:.3f}".format(statistics['stat_transp_working'][i]), "{0:.3f}".format(statistics['stat_transp_walking'][i]), "{0:.3f}".format(statistics['stat_transp_handling'][i]), "{0:.3f}".format(statistics['stat_transp_idle'][i]), " || ",
"{0:.3f}".format(statistics['stat_transp_walking'][i]+statistics['stat_transp_handling'][i]+statistics['stat_transp_idle'][i]))
ce.printLog("--------------------------")
ce.printLog("{0:.3f}".format(np.mean(statistics['stat_transp_working'])), "{0:.3f}".format(np.mean(statistics['stat_transp_walking'])), "{0:.3f}".format(np.mean(statistics['stat_transp_handling'])), "{0:.3f}".format(np.mean(statistics['stat_transp_idle'])), " || ",
"{0:.3f}".format(np.mean(statistics['stat_transp_walking']) + np.mean(statistics['stat_transp_handling']) + np.mean(statistics['stat_transp_idle'])))
ce.printLog("##########################")
# Close report file
statistics['agent_reward_log'].close()
statistics['episode_log'].close()
# Calculate statistics of last quarter
pd_episode_log = pd.read_csv(parameters['PATH_TIME'] + "_episode_log.txt", sep=",", header=0, index_col=0)
last_quarter = int(len(pd_episode_log.index) / 4)
dt_weights_time = pd_episode_log['dt'].tail(last_quarter).tolist()
dt_weights_orders = pd_episode_log['finished_orders'].tail(last_quarter).tolist()
lq_stats = dict()
for kpi in pd_episode_log.columns:
if kpi in ['dt', 'dt_real_time', 'valid_actions', 'total_reward', 'machines_total', 'selected_idle', 'forced_idle', 'threshold_waiting', 'finished_orders', 'processed_orders']:
lq_stats.update({kpi: np.average(pd_episode_log[kpi].tail(last_quarter).tolist())})
elif kpi in ['machines_working', 'machines_changeover', 'machines_broken', 'machines_idle', 'machines_processed_orders', 'transp_working', 'transp_walking', 'transp_handling', 'transp_idle', 'alpha', 'inventory']:
lq_stats.update({kpi: np.average(pd_episode_log[kpi].tail(last_quarter).tolist(), weights=dt_weights_time)})
elif kpi in ['order_waiting_time']:
lq_stats.update({kpi: np.average(pd_episode_log[kpi].tail(last_quarter).tolist(), weights=dt_weights_orders)})
else:
lq_stats.update({kpi: 0.0})
pd.DataFrame.from_dict(lq_stats, orient="index").to_csv(parameters['PATH_TIME'] + "_kpi_log.txt", sep=",", header=0)
ce.printLog("Export order log ", datetime.now())
export_df = []
for x in statistics['orders_done']:
export_df.append(x.order_log)
pd.DataFrame(export_df).to_csv(str(path) + '_order_log.txt', header=None, index=None, sep=',', mode='a')
ce.printLog("Export transport log ", datetime.now())
export_df = pd.DataFrame(columns = None)
for x in resources['transps']:
temp_df = pd.DataFrame(x.transp_log)
new_header = temp_df.iloc[0]
temp_df = temp_df[1:]
temp_df.columns = new_header
temp_df = temp_df.add_prefix("transp_" + str(x.id) + "_")
export_df = pd.concat([export_df, temp_df], axis=1)
export_df.to_csv(str(path) + '_transport_log.txt', index=None, sep=',', mode='a')
ce.printLog("Export machine log ", datetime.now())
export_df = pd.DataFrame(columns = None)
for x in resources['machines']:
temp_df = pd.DataFrame(x.machine_log)
new_header = temp_df.iloc[0]
temp_df = temp_df[1:]
temp_df.columns = new_header
temp_df = temp_df.add_prefix("machine_" + str(x.id) + "_")
export_df = | pd.concat([export_df, temp_df], axis=1) | pandas.concat |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for holiday_effects."""
from absl.testing import parameterized
import pandas as pd
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import holiday_effects
HOLIDAY_FILE_FIELDS = ['geo', 'holiday', 'date']
class HolidayEffectsTest(test_util.TestCase):
def test_get_default_holidays_invalid_country(self):
times = | pd.to_datetime(['2012-12-25', '2013-01-01']) | pandas.to_datetime |
import datetime as dt
import pandas as pd
import numpy as np
import re
# Begin User Input Data
report_date = dt.datetime(2020, 8, 31)
wscf_market_value = 194719540.46
aqr_market_value = 182239774.63
delaware_market_value = 151551731.17
wellington_market_value = 149215529.22
qic_cash_market_value = 677011299.30
input_directory = 'U:/'
output_directory = 'U:/'
jpm_filepath = input_directory + 'CIO/#Data/input/jpm/holdings/2020/08/Priced Positions - All.csv'
wscf_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wscf_holdings.xlsx'
aqr_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/aqr_holdings.xls'
delaware_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/delaware_holdings.xlsx'
wellington_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wellington_holdings.xlsx'
qic_cash_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/07/qic_cash_holdings.xlsx'
tickers_filepath = input_directory + 'CIO/#Holdings/Data/input/tickers/tickers_201909.xlsx'
asx_filepath = input_directory + 'CIO/#Data/input/asx/ASX300/20200501-asx300.csv'
aeq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_AEQ_Manager Version.xlsx'
ieq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_IEQ_Manager Version.xlsx'
aeq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/aeq_exclusions_' + str(report_date.date()) + '.csv'
ieq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/ieq_exclusions_' + str(report_date.date()) + '.csv'
# End User Input Data
# Account Name to LGS Name dictionary
australian_equity_managers_dict = {
'LGS AUSTRALIAN EQUITIES - BLACKROCK': 'BlackRock',
'LGS AUSTRALIAN EQUITIES - ECP': 'ECP',
'LGS AUSTRALIAN EQUITIES DNR CAPITAL': 'DNR',
'LGS AUSTRALIAN EQUITIES - PENDAL': 'Pendal',
'LGS AUSTRALIAN EQUITIES - SSGA': 'SSGA',
'LGS AUSTRALIAN EQUITIES - UBIQUE': 'Ubique',
'LGS AUSTRALIAN EQUITIES - WSCF': 'First Sentier',
'LGS AUSTRALIAN EQUITIES REBALANCE': 'Rebalance',
'LGS AUST EQUITIES - ALPHINITY': 'Alphinity'
}
international_equity_managers_dict = {
'LGS INTERNATIONAL EQUITIES - WCM': 'WCM',
'LGS INTERNATIONAL EQUITIES - AQR': 'AQR',
'LGS INTERNATIONAL EQUITIES - HERMES': 'Hermes',
'LGS INTERNATIONAL EQUITIES - IMPAX': 'Impax',
'LGS INTERNATIONAL EQUITIES - LONGVI EW': 'Longview',
'LGS INTERNATIONAL EQUITIES - LSV': 'LSV',
'LGS INTERNATIONAL EQUITIES - MFS': 'MFS',
'LGS INTERNATIONAL EQUITIES - MACQUARIE': 'Macquarie',
'LGS INTERNATIONAL EQUITIES - WELLINGTON': 'Wellington',
'LGS GLOBAL LISTED PROPERTY - RESOLUTION': 'Resolution',
}
# Imports JPM Mandates holdings data
df_jpm = pd.read_csv(
jpm_filepath,
skiprows=[0, 1, 2, 3],
header=0,
usecols=[
'Account Number',
'Account Name',
'Security ID',
'ISIN',
'Security Name',
'Asset Type Description',
'Price Date',
'Market Price',
'Total Units',
'Total Market Value (Local)',
'Total Market Value (Base)',
'Local Currency'
],
parse_dates=['Price Date'],
infer_datetime_format=True
)
# Renames the columns into LGS column names
df_jpm = df_jpm.rename(
columns={
'Security ID': 'SEDOL',
'Asset Type Description': 'Asset Type',
'Price Date': 'Date',
'Market Price': 'Purchase Price Local',
'Total Units': 'Quantity',
'Total Market Value (Local)': 'Market Value Local',
'Total Market Value (Base)': 'Market Value AUD',
'Local Currency': 'Currency'
}
)
df_jpm['Purchase Price AUD'] = df_jpm['Market Value AUD'] / df_jpm['Quantity']
# Imports WSCF holdings data
df_wscf = pd.read_excel(
pd.ExcelFile(wscf_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 8],
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Name',
'Unit Holdings',
'Market Value (Local Currency)',
'Market Value (Base Currency)',
'Security Currency'
]
)
# Renames the columns into LGS column names
df_wscf = df_wscf.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Unit Holdings': 'Quantity',
'Market Value (Local Currency)': 'Market Value Local',
'Market Value (Base Currency)': 'Market Value AUD',
'Security Currency': 'Currency'
}
)
# Scales holdings by market value
wscf_scaling_factor = wscf_market_value/df_wscf['Market Value AUD'].sum()
df_wscf['Market Value Local'] = wscf_scaling_factor * df_wscf['Market Value Local']
df_wscf['Market Value AUD'] = wscf_scaling_factor * df_wscf['Market Value AUD']
df_wscf['Quantity'] = wscf_scaling_factor * df_wscf['Quantity']
df_wscf['Purchase Price Local'] = df_wscf['Market Value Local'] / df_wscf['Quantity']
df_wscf['Purchase Price AUD'] = df_wscf['Market Value AUD'] / df_wscf['Quantity']
df_wscf['Account Number'] = 'WSCF'
df_wscf['Account Name'] = 'LGS AUSTRALIAN EQUITIES - WSCF'
df_wscf['Date'] = report_date
df_wscf['Asset Type'] = np.nan
# Imports AQR holdings data
df_aqr = pd.read_excel(
pd.ExcelFile(aqr_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7],
header=0,
usecols=[
'Sedol',
'Isin',
'Investment Description',
'Asset Type',
'Price Local',
'Base Price',
'Quantity',
'MV Local',
'MV Base',
'Ccy'
]
)
# Renames the columns into LGS column names
df_aqr = df_aqr.rename(
columns={
'Sedol': 'SEDOL',
'Isin': 'ISIN',
'Investment Description': 'Security Name',
'Price Local': 'Purchase Price Local',
'Base Price': 'Purchase Price AUD',
'MV Local': 'Market Value Local',
'MV Base': 'Market Value AUD',
'Ccy': 'Currency'
}
)
# Scales holdings by market value
aqr_scaling_factor = aqr_market_value/df_aqr['Market Value AUD'].sum()
df_aqr['Market Value Local'] = aqr_scaling_factor * df_aqr['Market Value Local']
df_aqr['Market Value AUD'] = aqr_scaling_factor * df_aqr['Market Value AUD']
df_aqr['Quantity'] = aqr_scaling_factor * df_aqr['Quantity']
df_aqr['Account Number'] = 'AQR'
df_aqr['Account Name'] = 'LGS INTERNATIONAL EQUITIES - AQR'
df_aqr['Date'] = report_date
# Imports Delaware holdings data
df_delaware = pd.read_excel(
pd.ExcelFile(delaware_filepath),
sheet_name='EM SICAV holdings 7-31-2020',
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Description (Short)',
'Position Date',
'Shares/Par',
'Trading Currency',
'Traded Market Value (Local)',
'Traded Market Value (AUD)'
]
)
# Renames the columns into LGS column names
df_delaware = df_delaware.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Security Description (Short)': 'Security Name',
'Position Date': 'Date',
'Shares/Par': 'Quantity',
'Trading Currency': 'Currency',
'Traded Market Value (Local)': 'Market Value Local',
'Traded Market Value (AUD)': 'Market Value AUD'
}
)
# Scales holdings by market value
delaware_scaling_factor = delaware_market_value/df_delaware['Market Value AUD'].sum()
df_delaware['Market Value Local'] = delaware_scaling_factor * df_delaware['Market Value Local']
df_delaware['Market Value AUD'] = delaware_scaling_factor * df_delaware['Market Value AUD']
df_delaware['Quantity'] = delaware_scaling_factor * df_aqr['Quantity']
df_delaware['Purchase Price Local'] = df_delaware['Market Value Local'] / df_delaware['Quantity']
df_delaware['Purchase Price AUD'] = df_delaware['Market Value AUD'] / df_delaware['Quantity']
df_delaware['Account Number'] = 'MACQUARIE'
df_delaware['Account Name'] = 'LGS INTERNATIONAL EQUITIES - MACQUARIE'
df_delaware['Date'] = report_date
# Imports Wellington holdings data
df_wellington = pd.read_excel(
pd.ExcelFile(wellington_filepath),
sheet_name='wellington_holdings',
header=0,
usecols=[
'SEDOL',
'ISIN',
'Security',
'Shares or Par Value',
'ISO Code',
'Market Value (Local)',
'Market Value (Report Currency)'
]
)
# Renames the columns into LGS column names
df_wellington = df_wellington.rename(
columns={
'Security': 'Security Name',
'Shares or Par Value': 'Quantity',
'ISO Code': 'Currency',
'Market Value (Local)': 'Market Value Local',
'Market Value (Report Currency)': 'Market Value AUD'
}
)
# Scales holdings by market value
wellington_scaling_factor = wellington_market_value/df_wellington['Market Value AUD'].sum()
df_wellington['Market Value Local'] = wellington_scaling_factor * df_wellington['Market Value Local']
df_wellington['Market Value AUD'] = wellington_scaling_factor * df_wellington['Market Value AUD']
df_wellington['Quantity'] = wellington_scaling_factor * df_wellington['Quantity']
df_wellington['Purchase Price Local'] = df_wellington['Market Value Local'] / df_wellington['Quantity']
df_wellington['Purchase Price AUD'] = df_wellington['Market Value AUD'] / df_wellington['Quantity']
df_wellington['Account Number'] = 'WELLINGTON'
df_wellington['Account Name'] = 'LGS INTERNATIONAL EQUITIES - WELLINGTON'
df_wellington['Date'] = report_date
df_qic_cash = pd.read_excel(
pd.ExcelFile(qic_cash_filepath),
sheet_name='Risk and Exposure',
header=4,
usecols=[
'ISIN',
'Security Description',
'Security Type',
'Currency',
'Market Value %'
]
)
df_qic_cash = df_qic_cash.rename(
columns={
'Security Description': 'Security Name',
'Security Type': 'Asset Type'
}
)
df_qic_cash['Market Value Local'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Market Value AUD'] = df_qic_cash['Market Value %'] * qic_cash_market_value
df_qic_cash['Quantity'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Purchase Price Local'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Purchase Price AUD'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Account Number'] = 'QIC Cash'
df_qic_cash['Account Name'] = 'LGS CASH - QIC CASH'
df_qic_cash['Date'] = report_date
df_qic_cash = df_qic_cash.drop(columns=['Market Value %'], axis=1)
df_qic_cash = df_qic_cash[~df_qic_cash['Security Name'].isin([np.nan])].reset_index(drop=True)
# Joins all the dataframes
df_main = pd.concat([df_jpm, df_wscf, df_aqr, df_delaware, df_wellington], axis=0, sort=True).reset_index(drop=True)
# Outputs all of the holdings
df_main_all = df_main.copy()
df_main_all = df_main_all.drop(['Date'], axis=1)
df_main_all.to_csv(output_directory + 'CIO/#Data/output/holdings/all_holdings.csv', index=False)
# <NAME> Spreadsheet
df_cp = df_main_all[['Account Name', 'Security Name', 'Market Value AUD']]
df_cp.to_csv(output_directory + 'CIO/#Data/output/holdings/craigpete.csv', index=False)
# Selects Australian Equity and International Equity managers only JANA
df_main_all_aeq = df_main_all[df_main_all['Account Name'].isin(australian_equity_managers_dict)].reset_index(drop=True)
df_main_all_ieq = df_main_all[df_main_all['Account Name'].isin(international_equity_managers_dict)].reset_index(drop=True)
# Writes to excel file for JANA
writer = pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/jana/aeq_holdings.xlsx', engine='xlsxwriter')
account_to_dataframe_dict = dict(list(df_main_all_aeq.groupby('Account Name')))
for account, dataframe in account_to_dataframe_dict.items():
dataframe.to_excel(writer, sheet_name=australian_equity_managers_dict[account], index=False)
writer.save()
writer = pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/jana/ieq_holdings.xlsx', engine='xlsxwriter')
account_to_dataframe_dict = dict(list(df_main_all_ieq.groupby('Account Name')))
for account, dataframe in account_to_dataframe_dict.items():
dataframe.to_excel(writer, sheet_name=international_equity_managers_dict[account], index=False)
writer.save()
# Starts top holdings section
# Removes SEDOLS with np.nan value
df_main_nan = df_main[df_main['SEDOL'].isin([np.nan])]
df_main = df_main[~df_main['SEDOL'].isin([np.nan])].reset_index(drop=True)
df_main = df_main[~df_main['ISIN'].isin([np.nan])].reset_index(drop=True)
# Cleans the SEDOL and ISIN strings
df_main['SEDOL'] = [str(df_main['SEDOL'][i]).replace(" ", "").upper() for i in range(0, len(df_main))]
df_main['ISIN'] = [str(df_main['ISIN'][i]).replace(" ", "").upper() for i in range(0, len(df_main))]
# Selects Australian Equity and International Equity managers only
df_main_aeq = df_main[df_main['Account Name'].isin(australian_equity_managers_dict)].reset_index(drop=True)
df_main_ieq = df_main[df_main['Account Name'].isin(international_equity_managers_dict)].reset_index(drop=True)
# Calculates % of portfolio within each asset class
df_main_aeq['(%) of Portfolio'] = (df_main_aeq['Market Value AUD'] / df_main_aeq['Market Value AUD'].sum()) * 100
df_main_ieq['(%) of Portfolio'] = (df_main_ieq['Market Value AUD'] / df_main_ieq['Market Value AUD'].sum()) * 100
# Sums all the security market values by their SEDOL
df_main_aeq = df_main_aeq.groupby(['SEDOL']).sum().sort_values(['Market Value AUD'], ascending=[False])[['Market Value AUD', '(%) of Portfolio']]
df_main_ieq = df_main_ieq.groupby(['SEDOL']).sum().sort_values(['Market Value AUD'], ascending=[False])[['Market Value AUD', '(%) of Portfolio']]
# Selects SEDOLS and Security names
df_security_names = df_main[['SEDOL', 'Security Name']].drop_duplicates(subset=['SEDOL'], keep='first').reset_index(drop=True)
# Merges security names back onto df_main_aeq
df_main_aeq = pd.merge(
left=df_main_aeq,
right=df_security_names,
left_on=['SEDOL'],
right_on=['SEDOL'],
how='outer',
indicator=True
)
df_main_aeq = df_main_aeq[df_main_aeq['_merge'].isin(['left_only', 'both'])].drop(columns=['_merge'], axis=1)
# Merges security names back onto df_main_ieq
df_main_ieq = pd.merge(
left=df_main_ieq,
right=df_security_names,
left_on=['SEDOL'],
right_on=['SEDOL'],
how='outer',
indicator=True
)
df_main_ieq = df_main_ieq[df_main_ieq['_merge'].isin(['left_only', 'both'])].drop(columns=['_merge'], axis=1)
# Remove AUD
df_main_ieq = df_main_ieq[~df_main_ieq['SEDOL'].isin(['--'])].reset_index(drop=True)
# Creates SEDOL to LGS friendly names dictionary for the top 10 holdings table for AE and IE.
sedol_to_common_name_dict = {
'6215035': 'CBA',
'6144690': 'BHP',
'6185495': 'CSL',
'6624608': 'NAB',
'6076146': 'Westpac',
'B28YTC2': 'Macquarie',
'6065586': 'ANZ',
'6087289': 'Telstra',
'6948836': 'Westfarmers',
'6220103': '<NAME>',
'6981239': 'Woolworths',
'BTN1Y11': 'Medtronic',
'B2PZN04': 'Visa',
'2661568': 'Oracle',
'2886907': '<NAME>',
'2842040': 'State Street',
'B4BNMY3': 'Accenture',
'2044545': 'Comcast',
'2270726': '<NAME>',
'BD6K457': 'Compass',
'2210959': 'Canadian Rail',
'7123870': 'Nestle',
'2588173': 'Microsoft',
'B4MGBG6': 'HCA',
'BMMV2K8': 'Tencent',
'2046251': 'Apple',
'6066608': 'Amcor',
'B44WZD7': 'Prologis',
'2000019': 'Amazon',
'--': 'AUD'
}
# Selects top 10 holdings for AE and IE
df_main_aeq_top10 = df_main_aeq.head(10)[['SEDOL', 'Market Value AUD', '(%) of Portfolio']]
df_main_ieq_top10 = df_main_ieq.head(10)[['SEDOL', 'Market Value AUD', '(%) of Portfolio']]
# Applies SEDOL to company name dictionary
df_main_aeq_top10['Company'] = [sedol_to_common_name_dict[df_main_aeq_top10['SEDOL'][i]] for i in range(0, len(df_main_aeq_top10))]
df_main_ieq_top10['Company'] = [sedol_to_common_name_dict[df_main_ieq_top10['SEDOL'][i]] for i in range(0, len(df_main_ieq_top10))]
# Divides market value by a million
df_main_aeq_top10['Market Value'] = df_main_aeq_top10['Market Value AUD'] / 1000000
df_main_ieq_top10['Market Value'] = df_main_ieq_top10['Market Value AUD'] / 1000000
# Selects columns for output into latex
df_main_aeq_top10 = df_main_aeq_top10[['Company', 'Market Value', '(%) of Portfolio']].round(2)
df_main_ieq_top10 = df_main_ieq_top10[['Company', 'Market Value', '(%) of Portfolio']].round(2)
# Outputs the tables into latex
with open(output_directory + 'CIO/#Data/output/investment/holdings/top10_local.tex', 'w') as tf:
tf.write(df_main_aeq_top10.to_latex(index=False))
with open(output_directory + 'CIO/#Data/output/investment/holdings/top10_foreign.tex', 'w') as tf:
tf.write(df_main_ieq_top10.to_latex(index=False))
# Writes to excel
writer = | pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/top_holdings.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import os
import csv
import pandas as pd
def main(dataset_path, dataset_mode):
data_label_pairs = []
data_path = 'leftImg8bit/'
label_path = 'gtFine/'
for subdirectory in os.listdir(dataset_path + '/' + data_path + dataset_mode):
for image_path in os.listdir(dataset_path + '/' + data_path + dataset_mode + '/' + subdirectory):
image_label_path = image_path.split('_')[0:-1]
image_label_path.append('gtFine_labelIds.png')
image_label_path = '_'.join(image_label_path)
data_label_pairs.append(data_path + dataset_mode + '/' + subdirectory + '/' + image_path+','+ label_path + dataset_mode + '/' + subdirectory + '/' + image_label_path)
df = | pd.DataFrame(data_label_pairs) | pandas.DataFrame |
import pandas as pd
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Risk import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Selection import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import timedelta, datetime, date
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import resample, shuffle
class RunConfig:
"""This class contains major parameters for running algorithm
"""
# Start date for back-testing
StartDate = date(2009, 5, 1)
# End date for backtesting
EndDate = date(2019, 12, 1)
# Initial Cash
StrategyCash = 200000
# Selection of long only (True) or long-short (False)
LongOnly = True
# Position holding period, days
PositionLifetime = timedelta(days=25)
# Vertical barrier, days (25 or 35 days for QC platform)
VertBarDays = 25
# For running on LEAN locally please provide a path to folder with data
PathToLocalFolder = ""
class QCTickDataStrategy(QCAlgorithm):
""" This algo implements RF triple barrier strategy based on raw tick data.
"""
def __init__(self):
# symbols of assets from MOEX
self.assets_keys = ['AFKS', 'ALRS', 'CHMF', 'GAZP',
'GMKN', 'LKOH', 'MGNT', 'MTSS',
'NVTK', 'ROSN', 'RTKM', 'SBER',
'SNGS', 'TATN', 'VTBR', 'YNDX']
# features to store in dataframe for ML
self.colsU = ['Logret', 'Momone', 'Momtwo', 'Momthree', 'Momfour', 'Momfive',
'Volatilityfifty', 'Volatilitythirtyone', 'Volatilityfifteen',
'Autocorrone', 'Autocorrtwo', 'Autocorrthree', 'Autocorrfour', 'Autocorrfive',
'Logtone', 'Logttwo', 'Logtthree', 'Logtfour', 'Logtfive',
'Bin', 'Side']
# dictionary to store custom asset objects
self.assets = {}
# dictionary to store pandas DataFrames with features for ML
self.features_dict = {}
# dictionary to store ML classifier (RandomForest)
self.clf_dict = {}
# dictionary to store end holding time for each position
self.stop_time_dict = {}
def Initialize(self):
# setting start and end date to run algorithm
self.SetStartDate(RunConfig.StartDate)
self.SetEndDate(RunConfig.EndDate)
# setting initial funds
self.SetCash(RunConfig.StrategyCash)
# creating custom assets from AdvancedBars class for each symbol
self.assets = {i: self.AddData(AdvancedBars, i) for i in self.assets_keys}
# creating empty dataframes for each symbol
self.features_dict = {i: | pd.DataFrame(columns=self.colsU) | pandas.DataFrame |
import pandas as pd
import os
from upload_dados import *
import plotly.express as px
import numpy as np
os.system('cls')
#4. Qual a antecedência média das reservas?
# filtrando todos os dados dos anuncios alugados
data_df = data_df.loc[data_df['booked_on'] != 'blank']
data_df = pd.DataFrame(data_df)
#convertendo os dados para o tipo data
data_df['date'] = | pd.to_datetime(data_df['date']) | pandas.to_datetime |