input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
"unicode": "1f1ee-1f1e9"
},
":flag_ie:": {
"category": "flags",
"name": "ireland",
"unicode": "1f1ee-1f1ea"
},
":flag_il:": {
"category": "flags",
"name": "israel",
"unicode": "1f1ee-1f1f1"
},
":flag_im:": {
"category": "flags",
"name": "isle of man",
"unicode": "1f1ee-1f1f2"
},
":flag_in:": {
"category": "flags",
"name": "india",
"unicode": "1f1ee-1f1f3"
},
":flag_io:": {
"category": "flags",
"name": "british indian ocean territory",
"unicode": "1f1ee-1f1f4"
},
":flag_iq:": {
"category": "flags",
"name": "iraq",
"unicode": "1f1ee-1f1f6"
},
":flag_ir:": {
"category": "flags",
"name": "iran",
"unicode": "1f1ee-1f1f7"
},
":flag_is:": {
"category": "flags",
"name": "iceland",
"unicode": "1f1ee-1f1f8"
},
":flag_it:": {
"category": "flags",
"name": "italy",
"unicode": "1f1ee-1f1f9"
},
":flag_je:": {
"category": "flags",
"name": "jersey",
"unicode": "1f1ef-1f1ea"
},
":flag_jm:": {
"category": "flags",
"name": "jamaica",
"unicode": "1f1ef-1f1f2"
},
":flag_jo:": {
"category": "flags",
"name": "jordan",
"unicode": "1f1ef-1f1f4"
},
":flag_jp:": {
"category": "flags",
"name": "japan",
"unicode": "1f1ef-1f1f5"
},
":flag_ke:": {
"category": "flags",
"name": "kenya",
"unicode": "1f1f0-1f1ea"
},
":flag_kg:": {
"category": "flags",
"name": "kyrgyzstan",
"unicode": "1f1f0-1f1ec"
},
":flag_kh:": {
"category": "flags",
"name": "cambodia",
"unicode": "1f1f0-1f1ed"
},
":flag_ki:": {
"category": "flags",
"name": "kiribati",
"unicode": "1f1f0-1f1ee"
},
":flag_km:": {
"category": "flags",
"name": "the comoros",
"unicode": "1f1f0-1f1f2"
},
":flag_kn:": {
"category": "flags",
"name": "saint kitts and nevis",
"unicode": "1f1f0-1f1f3"
},
":flag_kp:": {
"category": "flags",
"name": "north korea",
"unicode": "1f1f0-1f1f5"
},
":flag_kr:": {
"category": "flags",
"name": "korea",
"unicode": "1f1f0-1f1f7"
},
":flag_kw:": {
"category": "flags",
"name": "kuwait",
"unicode": "1f1f0-1f1fc"
},
":flag_ky:": {
"category": "flags",
"name": "cayman islands",
"unicode": "1f1f0-1f1fe"
},
":flag_kz:": {
"category": "flags",
"name": "kazakhstan",
"unicode": "1f1f0-1f1ff"
},
":flag_la:": {
"category": "flags",
"name": "laos",
"unicode": "1f1f1-1f1e6"
},
":flag_lb:": {
"category": "flags",
"name": "lebanon",
"unicode": "1f1f1-1f1e7"
},
":flag_lc:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f1-1f1e8"
},
":flag_li:": {
"category": "flags",
"name": "liechtenstein",
"unicode": "1f1f1-1f1ee"
},
":flag_lk:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f1-1f1f0"
},
":flag_lr:": {
"category": "flags",
"name": "liberia",
"unicode": "1f1f1-1f1f7"
},
":flag_ls:": {
"category": "flags",
"name": "lesotho",
"unicode": "1f1f1-1f1f8"
},
":flag_lt:": {
"category": "flags",
"name": "lithuania",
"unicode": "1f1f1-1f1f9"
},
":flag_lu:": {
"category": "flags",
"name": "luxembourg",
"unicode": "1f1f1-1f1fa"
},
":flag_lv:": {
"category": "flags",
"name": "latvia",
"unicode": "1f1f1-1f1fb"
},
":flag_ly:": {
"category": "flags",
"name": "libya",
"unicode": "1f1f1-1f1fe"
},
":flag_ma:": {
"category": "flags",
"name": "morocco",
"unicode": "1f1f2-1f1e6"
},
":flag_mc:": {
"category": "flags",
"name": "monaco",
"unicode": "1f1f2-1f1e8"
},
":flag_md:": {
"category": "flags",
"name": "moldova",
"unicode": "1f1f2-1f1e9"
},
":flag_me:": {
"category": "flags",
"name": "montenegro",
"unicode": "1f1f2-1f1ea"
},
":flag_mf:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f2-1f1eb"
},
":flag_mg:": {
"category": "flags",
"name": "madagascar",
"unicode": "1f1f2-1f1ec"
},
":flag_mh:": {
"category": "flags",
"name": "the marshall islands",
"unicode": "1f1f2-1f1ed"
},
":flag_mk:": {
"category": "flags",
"name": "macedonia",
"unicode": "1f1f2-1f1f0"
},
":flag_ml:": {
"category": "flags",
"name": "mali",
"unicode": "1f1f2-1f1f1"
},
":flag_mm:": {
"category": "flags",
"name": "myanmar",
"unicode": "1f1f2-1f1f2"
},
":flag_mn:": {
"category": "flags",
"name": "mongolia",
"unicode": "1f1f2-1f1f3"
},
":flag_mo:": {
"category": "flags",
"name": "macau",
"unicode": "1f1f2-1f1f4"
},
":flag_mp:": {
"category": "flags",
"name": "northern mariana islands",
"unicode": "1f1f2-1f1f5"
},
":flag_mq:": {
"category": "flags",
"name": "martinique",
"unicode": "1f1f2-1f1f6"
},
":flag_mr:": {
"category": "flags",
"name": "mauritania",
"unicode": "1f1f2-1f1f7"
},
":flag_ms:": {
"category": "flags",
"name": "montserrat",
"unicode": "1f1f2-1f1f8"
},
":flag_mt:": {
"category": "flags",
"name": "malta",
"unicode": "1f1f2-1f1f9"
},
":flag_mu:": {
"category": "flags",
"name": "mauritius",
"unicode": "1f1f2-1f1fa"
},
":flag_mv:": {
"category": "flags",
"name": "maldives",
"unicode": "1f1f2-1f1fb"
},
":flag_mw:": {
"category": "flags",
"name": "malawi",
"unicode": "1f1f2-1f1fc"
},
":flag_mx:": {
"category": "flags",
"name": "mexico",
"unicode": "1f1f2-1f1fd"
},
":flag_my:": {
"category": "flags",
"name": "malaysia",
"unicode": "1f1f2-1f1fe"
},
":flag_mz:": {
"category": "flags",
"name": "mozambique",
"unicode": "1f1f2-1f1ff"
},
":flag_na:": {
"category": "flags",
"name": "namibia",
"unicode": "1f1f3-1f1e6"
},
":flag_nc:": {
"category": "flags",
"name": "new caledonia",
"unicode": "1f1f3-1f1e8"
},
":flag_ne:": {
"category": "flags",
"name": "niger",
"unicode": "1f1f3-1f1ea"
},
":flag_nf:": {
"category": "flags",
"name": "norfolk island",
"unicode": "1f1f3-1f1eb"
},
":flag_ng:": {
"category": "flags",
"name": "nigeria",
"unicode": "1f1f3-1f1ec"
},
":flag_ni:": {
"category": "flags",
"name": "nicaragua",
"unicode": "1f1f3-1f1ee"
},
":flag_nl:": {
"category": "flags",
"name": "the netherlands",
"unicode": "1f1f3-1f1f1"
},
":flag_no:": {
"category": "flags",
"name": "norway",
"unicode": "1f1f3-1f1f4"
},
":flag_np:": {
"category": "flags",
"name": "nepal",
"unicode": "1f1f3-1f1f5"
},
":flag_nr:": {
"category": "flags",
"name": "nauru",
"unicode": "1f1f3-1f1f7"
},
":flag_nu:": {
"category": "flags",
"name": "niue",
"unicode": "1f1f3-1f1fa"
},
":flag_nz:": {
"category": "flags",
"name": "new zealand",
"unicode": "1f1f3-1f1ff"
},
":flag_om:": {
"category": "flags",
"name": "oman",
"unicode": "1f1f4-1f1f2"
},
":flag_pa:": {
"category": "flags",
"name": "panama",
"unicode": "1f1f5-1f1e6"
},
":flag_pe:": {
"category": "flags",
"name": "peru",
"unicode": "1f1f5-1f1ea"
},
":flag_pf:": {
"category": "flags",
"name": "french polynesia",
"unicode": "1f1f5-1f1eb"
},
":flag_pg:": {
"category": "flags",
"name": "papua new guinea",
"unicode": "1f1f5-1f1ec"
},
":flag_ph:": {
"category": "flags",
"name": "the philippines",
"unicode": "1f1f5-1f1ed"
},
":flag_pk:": {
"category": "flags",
"name": "pakistan",
"unicode": "1f1f5-1f1f0"
},
":flag_pl:": {
"category": "flags",
"name": "poland",
"unicode": "1f1f5-1f1f1"
},
":flag_pm:": {
"category": "flags",
"name": "saint pierre and miquelon",
"unicode": "1f1f5-1f1f2"
},
":flag_pn:": {
"category": "flags",
"name": "pitcairn",
"unicode": "1f1f5-1f1f3"
},
":flag_pr:": {
"category": "flags",
"name": "puerto rico",
"unicode": "1f1f5-1f1f7"
},
":flag_ps:": {
"category": "flags",
"name": "palestinian authority",
"unicode": "1f1f5-1f1f8"
},
":flag_pt:": {
"category": "flags",
"name": "portugal",
"unicode": "1f1f5-1f1f9"
},
":flag_pw:": {
"category": "flags",
"name": "palau",
"unicode": "1f1f5-1f1fc"
},
":flag_py:": {
"category": "flags",
"name": "paraguay",
"unicode": "1f1f5-1f1fe"
},
":flag_qa:": {
"category": "flags",
"name": "qatar",
"unicode": "1f1f6-1f1e6"
},
":flag_re:": {
"category": "flags",
"name": "r\u00e9union",
"unicode": "1f1f7-1f1ea"
},
":flag_ro:": {
"category": "flags",
"name": "romania",
"unicode": "1f1f7-1f1f4"
},
":flag_rs:": {
"category": "flags",
"name": "serbia",
"unicode": "1f1f7-1f1f8"
},
":flag_ru:": {
"category": "flags",
"name": "russia",
"unicode": "1f1f7-1f1fa"
},
":flag_rw:": {
"category": "flags",
"name": "rwanda",
"unicode": "1f1f7-1f1fc"
},
":flag_sa:": {
"category": "flags",
"name": "saudi arabia",
"unicode": "1f1f8-1f1e6"
},
":flag_sb:": {
"category": "flags",
"name": "the solomon islands",
"unicode": "1f1f8-1f1e7"
},
":flag_sc:": {
"category": "flags",
"name": "the seychelles",
"unicode": "1f1f8-1f1e8"
},
":flag_sd:": {
"category": "flags",
"name": "sudan",
"unicode": "1f1f8-1f1e9"
},
":flag_se:": {
"category": "flags",
"name": "sweden",
"unicode": "1f1f8-1f1ea"
},
":flag_sg:": {
"category": "flags",
"name": "singapore",
"unicode": "1f1f8-1f1ec"
},
":flag_sh:": {
"category": "flags",
"name": "saint helena",
"unicode": "1f1f8-1f1ed"
},
":flag_si:": {
"category": "flags",
"name": "slovenia",
"unicode": "1f1f8-1f1ee"
},
":flag_sj:": {
"category": "flags",
"name": "svalbard and <NAME>",
"unicode": "1f1f8-1f1ef"
},
":flag_sk:": {
"category": "flags",
"name": "slovakia",
"unicode": "1f1f8-1f1f0"
},
":flag_sl:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f8-1f1f1"
},
":flag_sm:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f8-1f1f2"
},
":flag_sn:": {
"category": "flags",
"name": "senegal",
"unicode": "1f1f8-1f1f3"
},
":flag_so:": {
"category": "flags",
"name": "somalia",
"unicode": "1f1f8-1f1f4"
},
":flag_sr:": {
"category": "flags",
"name": "suriname",
"unicode": "1f1f8-1f1f7"
},
":flag_ss:": {
"category": "flags",
"name": "south sudan",
"unicode": "1f1f8-1f1f8"
},
":flag_st:": {
"category": "flags",
"name": "s\u00e3o tom\u00e9 and pr\u00edncipe",
"unicode": "1f1f8-1f1f9"
},
":flag_sv:": {
"category": "flags",
"name": "el salvador",
"unicode": "1f1f8-1f1fb"
},
":flag_sx:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f8-1f1fd"
},
":flag_sy:": {
"category": "flags",
"name": "syria",
"unicode": "1f1f8-1f1fe"
},
":flag_sz:": {
"category": "flags",
"name": "swaziland",
"unicode": "1f1f8-1f1ff"
},
":flag_ta:": {
"category": "flags",
"name": "tristan da cunha",
"unicode": "1f1f9-1f1e6"
},
":flag_tc:": {
"category": "flags",
"name": "turks and caicos islands",
"unicode": "1f1f9-1f1e8"
},
":flag_td:": {
"category": "flags",
"name": "chad",
"unicode": "1f1f9-1f1e9"
},
":flag_tf:": {
"category": "flags",
"name": "french southern territories",
"unicode": "1f1f9-1f1eb"
},
":flag_tg:": {
"category": "flags",
"name": "togo",
"unicode": "1f1f9-1f1ec"
},
":flag_th:": {
"category": "flags",
"name": "thailand",
"unicode": "1f1f9-1f1ed"
},
":flag_tj:": {
"category": "flags",
"name": "tajikistan",
"unicode": "1f1f9-1f1ef"
},
":flag_tk:": {
"category": "flags",
"name": "tokelau",
"unicode": "1f1f9-1f1f0"
},
":flag_tl:": {
"category": "flags",
"name": "timor-leste",
"unicode": "1f1f9-1f1f1"
},
":flag_tm:": {
"category": "flags",
"name": "turkmenistan",
"unicode": "1f1f9-1f1f2"
},
":flag_tn:": {
"category": "flags",
"name": "tunisia",
"unicode": "1f1f9-1f1f3"
},
":flag_to:": {
"category": "flags",
"name": "tonga",
"unicode": "1f1f9-1f1f4"
},
":flag_tr:": {
"category": "flags",
"name": "turkey",
"unicode": "1f1f9-1f1f7"
},
":flag_tt:": {
"category": "flags",
"name": "trinidad and tobago",
"unicode": "1f1f9-1f1f9"
},
":flag_tv:": {
"category": "flags",
"name": "tuvalu",
"unicode": "1f1f9-1f1fb"
},
":flag_tw:": {
"category": "flags",
"name": "the republic of china",
"unicode": "1f1f9-1f1fc"
},
":flag_tz:": {
"category": "flags",
"name": "tanzania",
"unicode": "1f1f9-1f1ff"
},
":flag_ua:": {
"category": "flags",
"name": "ukraine",
"unicode": "1f1fa-1f1e6"
},
":flag_ug:": {
"category": "flags",
"name": "uganda",
"unicode": "1f1fa-1f1ec"
},
":flag_um:": {
"category": "flags",
"name": "united states minor outlying islands",
"unicode": "1f1fa-1f1f2"
},
":flag_us:": {
"category": "flags",
"name": "united states",
"unicode": "1f1fa-1f1f8"
},
":flag_uy:": {
"category": "flags",
"name": "uruguay",
"unicode": "1f1fa-1f1fe"
},
":flag_uz:": {
"category": "flags",
"name": "uzbekistan",
"unicode": "1f1fa-1f1ff"
},
":flag_va:": {
"category": "flags",
"name": | |
<filename>examples/inducing_points/inducing_points.py
# -*- coding: utf-8 -*-
hlp = """
Comparison of the inducing point selection methods with varying noise rates
on a simple Gaussian Process signal.
"""
if __name__ == "__main__":
import matplotlib
matplotlib.use("Agg")
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import sys
import csv
import datetime
import os
import itertools as it
import time
import scipy
import numpy as np
import argparse
from scipy.stats import multivariate_normal as mvn, pearsonr, entropy
from mklaren.kernel.kernel import exponential_kernel, kernel_sum
from mklaren.kernel.kinterface import Kinterface
from mklaren.mkl.mklaren import Mklaren
from mklaren.regression.ridge import RidgeLowRank
from mklaren.regression.spgp import SPGP
from mklaren.projection.rff import RFF_KMP, RFF_TYP_NS, RFF_TYP_STAT
from mklaren.regression.ridge import RidgeMKL
from arima import Arima
import matplotlib.pyplot as plt
import pickle, gzip
# Hyperparameters
n_range = (100,) # Different numbers of data points
input_dim = 1 # Input dimension; Generating grid becames untracable for input_dim > ~4 ...
rank_range = (3, 5,) # Ranks
lbd_range = (0,) # Regularization hyperparameter
gamma_range = [0.1, 0.3, 1, 3] # Exponentiated-quadratic kernel hyperparameters
pc = 0.1 # Pseudocount; prevents inf in KL-divergence.
repeats = 500 # Sampling repeats to compare distributions
# Method print ordering
meth_order = ["Mklaren", "Arima", "CSI", "ICD", "Nystrom", "RFF", "RFF-NS", "SPGP", "True"]
# Color mappings
meth2color = {"Mklaren": "green",
"CSI": "red",
"ICD": "blue",
"Nystrom": "pink",
"SPGP": "orange",
"RFF": "magenta",
"RFF-NS": "purple",
"Arima": "black",
"True": "black",
"l2krr": "green",
"align": "pink",
"uniform": "blue",
"alignf": "red",
"alignfc": "orange"}
def generate_data(n, rank,
inducing_mode="uniform", noise=1, gamma_range=(0.1,), seed=None,
input_dim=1, signal_sampling="GP", data="mesh"):
"""
Generate an artificial dataset with imput dimension.
:param n: Number od data points.
:param rank: Number of inducing points.
:param inducing_mode: Biased or uniform distribution of data points.
:param noise: Noise variance.
:param gamma_range: Number of kernels and hyperparameters.
:param seed: Random seed.
:param input_dim: Input space dimension.
:param signal_sampling: 'GP' or 'weights'. Weights is more efficient.
:param data: mesh or input_dim.
:return:
"""
if seed is not None:
np.random.seed(seed)
# Generate data for arbitray input_dim
if data == "mesh":
x = np.linspace(-10, 10, n).reshape((n, 1))
M = np.meshgrid(*(input_dim * [x]))
X = np.array(zip(*[m.ravel() for m in M]))
N = X.shape[0]
xp = np.linspace(-10, 10, 100).reshape((100, 1))
Mp = np.meshgrid(*(input_dim * [xp]))
Xp = np.array(zip(*[m.ravel() for m in Mp]))
elif data == "random":
# Ensure data is separated at proper lengthscales
ls = SPGP.gamma2lengthscale(min(gamma_range)) / np.sqrt(input_dim)
a, b = -n * ls / 2.0, n * ls / 2.0
X = a + 2 * b * np.random.rand(n, input_dim)
N = X.shape[0]
Xp = np.random.rand(100, input_dim)
else:
raise ValueError("Unknown data mode: %s" % data)
# Kernel sum
Ksum = Kinterface(data=X, kernel=kernel_sum,
kernel_args={
"kernels": [exponential_kernel] * len(gamma_range),
"kernels_args": [{"gamma": g} for g in gamma_range]})
# Sum of kernels
Klist = [Kinterface(data=X, kernel=exponential_kernel, kernel_args={"gamma": g})
for g in gamma_range]
a = np.arange(X.shape[0], dtype=int)
if inducing_mode == "uniform":
p = None
elif inducing_mode == "biased":
af = np.sum(X + abs(X.min(axis=0)), axis=1)
p = (af ** 2 / (af ** 2).sum())
else:
raise ValueError(inducing_mode)
inxs = np.random.choice(a, p=p, size=rank, replace=False)
if signal_sampling == "GP":
Kny = Ksum[:, inxs].dot(np.linalg.inv(Ksum[inxs, inxs])).dot(Ksum[inxs, :])
f = mvn.rvs(mean=np.zeros((N,)), cov=Kny)
y = mvn.rvs(mean=f, cov=noise * np.eye(N, N))
elif signal_sampling == "weights":
L = Ksum[:, inxs].dot(scipy.linalg.sqrtm(np.linalg.inv(Ksum[inxs, inxs])))
w = mvn.rvs(mean=np.zeros(rank,), cov=np.eye(rank, rank)).ravel()
f = L.dot(w)
y = f + np.random.rand(n, 1).ravel() * noise
else:
raise ValueError(signal_sampling)
return Ksum, Klist, inxs, X, Xp, y, f
def plot_signal(X, Xp, y, f, models=None, tit="", typ="plot_models", f_out = None):
"""
Plot fitted signal.
:param X: Sampling coordinates.
:param Xp: Plotting (whole signal) coordinates.
:param y: True observed values.
:param f: True signal.
:param models: Onr dictionary per model;
"yp" Predicted signal at yp.
"anchors" Anchor (inducing points coordinates), one set per lengthscale.
"color": Color.
"label": Name.
:param tit:
:param typ: plot_models or plot_gammas
:return:
"""
# Plot signal
plt.figure()
x = X.ravel()
xp = Xp.ravel()
xmin, xmax = xp.min(), xp.max()
ymin, ymax = int(min(f.min(), y.min())) - 1, int(max(f.max(), y.max())) + 1
# Plot data
plt.plot(x, y, "k.")
plt.plot(x, f, "r--")
# Compute anchor ticks
P = max([1] + map(lambda m: len(m.get("anchors", [])), models.values()))
if typ == "plot_gammas":
Gxs = [np.linspace(xmin, xmax, 5 + 10 * g) for g in np.logspace(-1, 1, P)]
elif typ == "plot_models":
Gxs = [np.linspace(xmin, xmax, 15) for g in np.logspace(-1, 1, len(models))]
else:
raise ValueError
Gys = range(ymin - len(Gxs), ymin)
# Plot freqency scales
for gi, (gx, gy) in enumerate(zip(Gxs, Gys)):
plt.plot(gx, [gy] * len(gx), "|", color="gray")
# Plot multiple signals and anchors
if models is not None:
for mi, (label, data) in enumerate(models.items()):
if label == "True": continue
yp = data.get("yp", np.zeros((len(X), )))
color = meth2color[label]
plt.plot(xp, yp, "-", color=color, label="%s" % label)
for mi, (label, data) in enumerate(sorted(models.items(), key=lambda lb: lb[0] == "True")):
anchors = data.get("anchors", [[]])
color = meth2color[label]
if typ == "plot_gammas": # Draw for different gammas
for gi in range(P):
if len(anchors) <= gi or not len(anchors[gi]): continue
plt.plot(anchors[gi], [Gys[gi]] * len(anchors[gi]), "^",
color=color, markersize=8, alpha=0.6)
elif typ == "plot_models": # Draw for different methods
gi = mi
ancs = np.array(anchors).ravel()
plt.text(xmin - 1, Gys[gi], "[%s]" % label, horizontalalignment="right",
verticalalignment="center", color=meth2color[label])
plt.plot(ancs, [Gys[gi]] * len(ancs), "^",
color=color, markersize=8, alpha=0.6)
plt.title(tit)
plt.yticks(np.linspace(ymin, ymax, 2 * (ymax - ymin) + 1).astype(int))
plt.ylim((ymin - len(Gys) - 1, ymax))
plt.xlabel("Input space (x)")
plt.ylabel("Output space (y)")
plt.gca().yaxis.set_label_coords(-0.05, 0.75)
if f_out is None:
plt.show()
else:
plt.savefig(f_out)
plt.close()
print("Written %s" % f_out)
def plot_signal_subplots(X, Xp, y, f, models=None, f_out=None):
"""
Plot fitted signal on multiple plots to avoid clutter.
Models dictionary does not assume the 'True' model
:param X: Sampling coordinates.
:param Xp: Plotting (whole signal) coordinates.
:param y: True observed values.
:param f: True signal.
:param models: Onr dictionary per model;
"yp" Predicted signal at yp.
"anchors" Anchor (inducing points coordinates), one set per lengthscale.
"color": Color.
"label": Name.
:param f_out: Output file. If not provided, show plot on screen.
:return:
"""
x = X.ravel()
xp = Xp.ravel()
xmin, xmax = min(0, xp.min()), xp.max()
ymin, ymax = y.min(), y.max()
nmods = len(models)
fig, ax = plt.subplots(sharex=True, ncols=1, nrows=nmods, figsize=(4.33, nmods * 0.8))
for mi, (label, data) in enumerate(sorted(models.items(), key=lambda t: meth_order.index(t[0]))):
lbl = label.replace("Nystrom", "Nyström")
yp = data.get("yp", np.zeros((len(X),)))
color = meth2color[label]
# Plot to axis
ax[mi].set_xlim(xmin, xmax)
ax[mi].set_ylim(ymin, ymax)
ax[mi].plot(x, y, ".", color="gray")
if f is not None: ax[mi].plot(x, f, "r--")
ax[mi].plot(xp, yp, "-", color=color, label="%s" % label, linewidth=1.5)
# Plot anchors if provided
anchors = data.get("anchors", [[]])
ancs = np.array(anchors).ravel()
ax[mi].plot(ancs, [ymin + (ymax - ymin) * 0.05] * len(ancs),
"^", color=color, markersize=8, alpha=0.6)
ax[mi].set_ylabel(lbl)
ax[-1].set_xlabel("Input space (x)")
fig.tight_layout()
if f_out is None:
plt.show()
else:
plt.savefig(f_out)
plt.close()
print("Written %s" % f_out)
f_out_gz = f_out + ".pkl.gz"
obj = (X, Xp, y, f, models)
pickle.dump(obj, gzip.open(f_out_gz, "w"), protocol=pickle.HIGHEST_PROTOCOL)
print("Written %s" % f_out_gz)
def test(Ksum, Klist, inxs, X, Xp, y, f, delta=10, lbd=0.1, kappa=0.99,
methods=("Mklaren", "ICD", "CSI", "Nystrom", "SPGP")):
"""
Sample data from a Gaussian process and compare fits with the sum of kernels
versus list of kernels.
:param Ksum:
:param Klist:
:param inxs:
:param X:
:param Xp:
:param y:
:param f:
:param delta:
:param lbd:
:param methods:
:return:
"""
def flatten(l):
return [item for sublist in l for item in sublist]
P = len(Klist) # Number of kernels
rank = len(inxs) # Total number of inducing points over all lengthscales
anchors = X[inxs,]
# True results
results = {"True": {"anchors": anchors,
"color": "black"}}
# Fit MKL for kernel sum and
if "Mklaren" in methods:
mkl = Mklaren(rank=rank,
delta=delta, lbd=lbd)
t1 = time.time()
mkl.fit(Klist, y)
t2 = time.time() - t1
y_Klist = mkl.predict([X] * len(Klist))
yp_Klist = mkl.predict([Xp] * len(Klist))
active_Klist = [flatten([mkl.data.get(gi, {}).get("act", []) for gi in range(P)])]
anchors_Klist = [X[ix] for ix in active_Klist]
try:
rho_Klist, _ = pearsonr(y_Klist, f)
except Exception as e:
rho_Klist = 0
evar = (np.var(y) - np.var(y - y_Klist)) / np.var(y)
results["Mklaren"] = {
"rho": rho_Klist,
"active": active_Klist,
"anchors": anchors_Klist,
"sol_path": mkl.sol_path,
"yp": yp_Klist,
"time": t2,
"evar": evar,
"model": mkl,
"color": meth2color["Mklaren"]}
# Fit CSI
if "CSI" in methods:
csi = RidgeLowRank(rank=rank, lbd=lbd,
method="csi", method_init_args={"delta": delta, "kappa": kappa},)
t1 = time.time()
csi.fit([Ksum], y)
t2 = time.time() - t1
y_csi = csi.predict([X])
yp_csi = csi.predict([Xp])
active_csi = csi.active_set_
anchors_csi = [X[ix] for | |
<filename>btb_manager_telegram/handlers.py<gh_stars>0
import json
import os
import shutil
import sqlite3
import subprocess
import sys
from configparser import ConfigParser
from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
CallbackContext,
CommandHandler,
ConversationHandler,
Filters,
MessageHandler,
)
from telegram.utils.helpers import escape_markdown
import i18n
from btb_manager_telegram import (
BOUGHT,
BUYING,
CUSTOM_SCRIPT,
DELETE_DB,
EDIT_COIN_LIST,
EDIT_USER_CONFIG,
MENU,
PANIC_BUTTON,
SELLING,
SOLD,
UPDATE_BTB,
UPDATE_TG,
buttons,
logger,
settings,
)
from btb_manager_telegram.binance_api_utils import send_signed_request
from btb_manager_telegram.utils import (
escape_tg,
find_and_kill_binance_trade_bot_process,
get_custom_scripts_keyboard,
i18n_format,
kill_btb_manager_telegram_process,
reply_text_escape,
telegram_text_truncator,
)
def menu(update: Update, _: CallbackContext) -> int:
logger.info(f"Menu selector. ({update.message.text})")
# Panic button disabled until PR #74 is complete
# keyboard = [
# [i18n_format('keyboard.current_value'), i18n_format('keyboard.current_ratios')],
# [i18n_format('keyboard.progress'), i18n_format('keyboard.trade_history')],
# [i18n_format('keyboard.check_status'), i18n_format('keyboard.panic')],
# [i18n_format('keyboard.maintenance'), i18n_format('keyboard.configurations')],
# ]
keyboard = [
[i18n_format("keyboard.current_value"), i18n_format("keyboard.progress")],
[i18n_format("keyboard.current_ratios"), i18n_format("keyboard.next_coin")],
[i18n_format("keyboard.check_status"), i18n_format("keyboard.trade_history")],
[i18n_format("keyboard.maintenance"), i18n_format("keyboard.configurations")],
]
config_keyboard = [
[i18n_format("keyboard.start"), i18n_format("keyboard.stop")],
[i18n_format("keyboard.read_logs"), i18n_format("keyboard.delete_db")],
[i18n_format("keyboard.edit_cfg"), i18n_format("keyboard.edit_coin_list")],
[i18n_format("keyboard.export_db"), i18n_format("keyboard.back")],
]
maintenance_keyboard = [
[i18n_format("keyboard.update_tgb")],
[i18n_format("keyboard.update_btb")],
[i18n_format("keyboard.execute_script")],
[i18n_format("keyboard.back")],
]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_markup_config = ReplyKeyboardMarkup(config_keyboard, resize_keyboard=True)
reply_markup_maintenance = ReplyKeyboardMarkup(
maintenance_keyboard, resize_keyboard=True
)
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text == "/start":
logger.info("Started conversation.")
message = (
f"{i18n_format('conversation_started')}\n" f"{i18n_format('select_option')}"
)
settings.CHAT.send_message(
escape_tg(message), reply_markup=reply_markup, parse_mode="MarkdownV2"
)
if update.message.text in [
i18n_format("keyboard.back"),
i18n_format("keyboard.great"),
]:
reply_text_escape_fun(
i18n_format("select_option"),
reply_markup=reply_markup,
parse_mode="MarkdownV2",
)
elif update.message.text in [
i18n_format("keyboard.go_back"),
i18n_format("keyboard.ok"),
i18n_format("keyboard.configurations"),
]:
reply_text_escape_fun(
i18n_format("select_option"),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text in [
i18n_format("keyboard.maintenance"),
i18n_format("keyboard.cancel_update"),
i18n_format("keyboard.cancel"),
i18n_format("keyboard.ok_s"),
]:
reply_text_escape_fun(
i18n_format("select_option"),
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.current_value"):
for mes in buttons.current_value():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.panic"):
message, status = buttons.panic_btn()
if status in [BOUGHT, BUYING, SOLD, SELLING]:
if status == BOUGHT:
kb = [
[i18n_format("keyboard.stop_sell")],
[i18n_format("keyboard.go_back")],
]
elif status in [BUYING, SELLING]:
kb = [
[i18n_format("keyboard.stop_cancel")],
[i18n_format("keyboard.go_back")],
]
elif status == SOLD:
kb = [[i18n_format("keyboard.stop")], [i18n_format("keyboard.go_back")]]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return PANIC_BUTTON
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.progress"):
for mes in buttons.check_progress():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.current_ratios"):
for mes in buttons.current_ratios():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.next_coin"):
for mes in buttons.next_coin():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.check_status"):
reply_text_escape_fun(
buttons.check_status(), reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.trade_history"):
for mes in buttons.trade_history():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.start"):
logger.info("Start bot button pressed.")
reply_text_escape_fun(
i18n_format("btb.starting"),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
status = buttons.start_bot()
message = [
i18n_format("btb.already_running"),
i18n_format("btb.started"),
i18n_format("btb.start_error"),
f"{i18n_format('btb.installation_path_error', path=settings.ROOT_PATH)}\n{i18n_format('btb.directory_hint')}",
f"{i18n_format('btb.lib_error', path=settings.PYTHON_PATH)}\n",
][status]
reply_text_escape_fun(
message,
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.stop"):
reply_text_escape_fun(
buttons.stop_bot(),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.read_logs"):
reply_text_escape_fun(
buttons.read_log(),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.delete_db"):
message, status = buttons.delete_db()
if status:
kb = [[i18n_format("keyboard.confirm"), i18n_format("keyboard.go_back")]]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return DELETE_DB
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.edit_cfg"):
message, status = buttons.edit_user_cfg()
if status:
reply_text_escape_fun(
message, reply_markup=ReplyKeyboardRemove(), parse_mode="MarkdownV2"
)
return EDIT_USER_CONFIG
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.edit_coin_list"):
message, status = buttons.edit_coin()
if status:
reply_text_escape_fun(
message, reply_markup=ReplyKeyboardRemove(), parse_mode="MarkdownV2"
)
return EDIT_COIN_LIST
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.export_db"):
message, document = buttons.export_db()
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
if document is not None:
settings.CHAT.send_document(
document=document,
filename="crypto_trading.db",
)
elif update.message.text == i18n_format("keyboard.update_tgb"):
message, status = buttons.update_tg_bot()
if status:
kb = [
[i18n_format("keyboard.update"), i18n_format("keyboard.cancel_update")]
]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return UPDATE_TG
else:
reply_text_escape_fun(
message,
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.update_btb"):
message, status = buttons.update_btb()
if status:
kb = [
[i18n_format("keyboard.update"), i18n_format("keyboard.cancel_update")]
]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return UPDATE_BTB
else:
reply_text_escape_fun(
message,
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.execute_script"):
kb, status, message = get_custom_scripts_keyboard()
if status:
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return CUSTOM_SCRIPT
else:
reply_text_escape_fun(
message,
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
return MENU
def edit_coin(update: Update, _: CallbackContext) -> int:
logger.info(f"Editing coin list. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != "/stop":
message = (
f"{i18n_format('coin_list.success')}\n\n"
f"```\n"
f"{update.message.text}\n"
f"```"
)
coin_file_path = os.path.join(settings.ROOT_PATH, "supported_coin_list")
try:
shutil.copyfile(coin_file_path, f"{coin_file_path}.backup")
with open(coin_file_path, "w") as f:
f.write(update.message.text + "\n")
except Exception as e:
logger.error(f"❌ Unable to edit coin list file: {e}", exc_info=True)
message = i18n_format("coin_list.error")
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('coin_list.not_modified')}"
)
keyboard = [[i18n_format("keyboard.go_back")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def edit_user_config(update: Update, _: CallbackContext) -> int:
logger.info(f"Editing user configuration. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != "/stop":
message = (
f"{i18n_format('config.success')}\n\n"
f"```\n"
f"{update.message.text}\n"
f"```"
)
user_cfg_file_path = os.path.join(settings.ROOT_PATH, "user.cfg")
try:
shutil.copyfile(user_cfg_file_path, f"{user_cfg_file_path}.backup")
with open(user_cfg_file_path, "w") as f:
f.write(update.message.text + "\n\n\n")
except Exception as e:
logger.error(
f"❌ Unable to edit user configuration file: {e}", exc_info=True
)
message = i18n_format("config.error")
try:
shutil.copymode(user_cfg_file_path, f"{user_cfg_file_path}.backup")
except:
pass
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('config.not_modified')}"
)
keyboard = [[i18n_format("keyboard.go_back")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def delete_db(update: Update, _: CallbackContext) -> int:
logger.info(
f"Asking if the user really wants to delete the db. ({update.message.text})"
)
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != i18n_format("keyboard.go_back"):
message = i18n_format("db.delete.success")
db_file_path = os.path.join(settings.ROOT_PATH, "data/crypto_trading.db")
pw_file_path = os.path.join(settings.ROOT_PATH, "data/paper_wallet.json")
log_file_path = os.path.join(settings.ROOT_PATH, "logs/crypto_trading.log")
try:
shutil.copyfile(db_file_path, f"{db_file_path}.backup")
os.remove(db_file_path)
if os.path.isfile(pw_file_path):
shutil.copyfile(pw_file_path, f"{pw_file_path}.backup")
os.remove(pw_file_path)
except Exception as e:
logger.error(f"❌ Unable to delete database file: {e}", exc_info=True)
message = i18n_format("db.delete.error")
try:
with open(log_file_path, "w") as f:
f.truncate()
except Exception as e:
logger.error(f"❌ Unable to clear log file: {e}", exc_info=True)
message = i18n_format("db.delete.clear_log_error")
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('db.delete.not_deleted')}"
)
keyboard = [[i18n_format("keyboard.ok")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def update_tg_bot(update: Update, _: CallbackContext) -> int:
logger.info(f"Updating BTB Manager Telegram. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != i18n_format("keyboard.cancel_update"):
message = i18n_format("update.tgb.updating")
keyboard = [["/start"]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
try:
manager_python_path = sys.executable
subprocess.call(
f"git pull && {manager_python_path} -m pip install -r requirements.txt --upgrade && "
f"{manager_python_path} -m btb_manager_telegram {settings.RAW_ARGS} &",
shell=True,
)
kill_btb_manager_telegram_process()
except Exception as e:
logger.error(f"❌ Unable to update BTB Manager Telegram: {e}", exc_info=True)
message = i18n_format("update.tgb.error")
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('update.tgb.not_updated')}"
)
keyboard = [[i18n_format("keyboard.ok_s")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
return MENU
def update_btb(update: Update, _: CallbackContext) -> int:
logger.info(f"Updating Binance Trade Bot. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
keyboard = [[i18n_format("keyboard.ok_s")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
if update.message.text != i18n_format("keyboard.cancel_update"):
message = (
f"{i18n_format('update.btb.updating')}\n"
f"{i18n_format('update.btb.start_manually')}"
)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
try:
find_and_kill_binance_trade_bot_process()
subprocess.call(
f"cd {settings.ROOT_PATH} && "
f"git pull && "
f"{settings.PYTHON_PATH} -m pip install -r requirements.txt --upgrade",
shell=True,
)
settings.BTB_UPDATE_BROADCASTED_BEFORE = False
except Exception as e:
logger.error(f"Unable to update Binance Trade Bot: {e}", exc_info=True)
message = "Unable to update Binance Trade Bot"
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('update.btb.not_updated')}"
)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
return MENU
def panic(update: Update, _: CallbackContext) -> int:
logger.info(f"Panic Button is doing its job. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
keyboard = [[i18n_format("keyboard.great")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
if update.message.text != i18n_format("keyboard.go_back"):
find_and_kill_binance_trade_bot_process()
# Get current coin pair
db_file_path = os.path.join(settings.ROOT_PATH, "data/crypto_trading.db")
con = sqlite3.connect(db_file_path)
cur = con.cursor()
# Get last trade
cur.execute(
"""SELECT alt_coin_id, crypto_coin_id FROM trade_history ORDER BY datetime DESC LIMIT 1;"""
)
alt_coin_id, crypto_coin_id = cur.fetchone()
# Get Binance api keys and tld
user_cfg_file_path = os.path.join(settings.ROOT_PATH, "user.cfg")
with open(user_cfg_file_path) as cfg:
config = ConfigParser()
config.read_file(cfg)
api_key = config.get("binance_user_config", "api_key")
api_secret_key = config.get("binance_user_config", "api_secret_key")
tld = config.get("binance_user_config", "tld")
if update.message.text != i18n_format("keyboard.stop_sell"):
params = {
"symbol": f"{alt_coin_id}{crypto_coin_id}",
"side": "SELL",
"type": "MARKET",
}
message = send_signed_request(
api_key,
api_secret_key,
f"https://api.binance.{tld}",
"POST",
"/api/v3/order",
payload=params,
)
if update.message.text != i18n_format("keyboard.stop_cancel"):
params = {"symbol": f"{alt_coin_id}{crypto_coin_id}"}
message = send_signed_request(
api_key,
api_secret_key,
f"https://api.binance.{tld}",
"DELETE",
"/api/v3/openOrders",
payload=params,
)
if update.message.text != i18n_format("keyboard.stop_bot"):
message = i18n_format("killed_bot")
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('update.btb.not_updated')}"
)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def execute_custom_script(update: Update, _: CallbackContext) -> int:
logger.info(f"Going to 🤖 execute custom script. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
keyboard = [[i18n_format("keyboard.ok_s")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
custom_scripts_path = "./config/custom_scripts.json"
| |
<gh_stars>0
#!/usr/bin/env python3
import time as timer
import sys
import logging
from collections import deque
from angr.exploration_techniques import ExplorationTechnique
import psutil
class ToolChainExplorer(ExplorationTechnique):
"""
TODO
"""
def __init__(
self,
simgr,
max_length,
exp_dir,
nameFileShort,
worker
):
#TODO refactor
super(ToolChainExplorer, self).__init__()
self._max_length = max_length
self.worker = worker
self.timeout = worker.timeout
self.jump_it = worker.jump_it
self.timeout_tab = worker.timeout_tab
self.start_time = timer.time()
self.log = logging.getLogger("ToolChainExplorer")
self.log.setLevel("INFO")
self.max_end_state = worker.max_end_state
self.errored = 0
self.unconstrained = 0
self.deadended = 0
self.active = 1
self.id = 0
self.snapshot_state = {}
self.fork_stack = deque()
self.pause_stash = simgr.stashes["pause"]
self.exp_dir = exp_dir
self.nameFileShort = nameFileShort
self.eval_time = worker.eval_time
self.time_id = 0
self.print_sm_step = True
self.loopBreak_stack = deque()
self.jump_concrete_dict = worker.jump_concrete_dict
self.jump_dict = worker.jump_dict
self.jump_dict[0] = {}
self.jump_concrete_dict[0] = {}
self.loop_counter_concrete = worker.loop_counter_concrete
self.max_step = worker.max_step
self.max_simul_state = worker.max_simul_state
self.max_in_pause_stach = worker.max_in_pause_stach
self.scdg = worker.scdg
self.scdg_fin = [] # TODO from main
self.dict_addr_vis = {}
self.print_on = worker.print_on
self.print_sm_step = worker.print_sm_step
self.print_syscall = worker.print_syscall
self.debug_error = worker.debug_error
self.loopBreak_stack = deque()
self.call_sim = worker.call_sim
self.expl_method = "DFS"
self.memory_limit = worker.memory_limit
def _filter(self, s):
return True
def check_constraint(self, state, value):
try:
val = state.solver.eval_one(value)
is_sao = hasattr(val, "to_claripy")
if is_sao:
val = val.to_claripy()
except Exception:
if self.print_on:
self.log.info("Symbolic value encountered !")
return value
return val
def __proper_formating(self, state, value):
"""
Take a state and a value (argument/return value) and return an appropriate reprensentation to use in SCDG.
"""
if hasattr(value, "to_claripy"):
value = value.to_claripy()
if hasattr(value, "symbolic") and value.symbolic and hasattr(value, "name"):
# self.log.info("case 1 formating")
return value.name
elif (
hasattr(value, "symbolic") and value.symbolic and len(value.variables) == 1
):
# import pdb; pdb.set_trace()
# self.log.info("case 2 formating")
# self.log.info(value.variables)
return list(value.variables)[0]
elif hasattr(value, "symbolic") and value.symbolic:
# self.log.info('case 3 : multiple variables involved')
# TODO improve this
ret = "_".join(list(value.variables))
return ret
else:
# self.log.info("case 4 formating")
try:
val = state.solver.eval_one(value)
return val
except:
return value
def take_smallest(self, simgr, source_stash):
"""
Take a state of source_stash with smallest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
min_step = 2000
if len(simgr.stashes[source_stash]) > 0:
id_to_move = simgr.stashes[source_stash][0].globals["id"]
min_step = simgr.stashes[source_stash][0].globals["n_steps"]
else:
return
for s in simgr.stashes[source_stash]:
if s.globals["n_steps"] < min_step or (
str(self.check_constraint(s, s.history.jump_target))
not in self.dict_addr_vis
and s.globals["n_steps"] <= min_step
):
id_to_move = s.globals["id"]
min_step = s.globals["n_steps"]
simgr.move(source_stash, "active", lambda s: s.globals["id"] == id_to_move)
def take_longuest(self, simgr, source_stash):
"""
Take a state of source_stash with longuest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
max_step = 0
if len(simgr.stashes[source_stash]) > 0:
id_to_move = simgr.stashes[source_stash][0].globals["id"]
max_step = simgr.stashes[source_stash][0].globals["n_steps"]
else:
return
for s in simgr.stashes[source_stash]:
if s.globals["n_steps"] > max_step:
id_to_move = s.globals["id"]
max_step = s.globals["n_steps"]
simgr.move(source_stash, "active", lambda s: s.globals["id"] == id_to_move)
def __take_custom(self, simgr, source_stash, moves):
"""
Take a state of source_stash with smallest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
if len(simgr.stashes[source_stash]) == 0:
return
for s in simgr.stashes[source_stash]:
if (
str(self.check_constraint(s, s.history.jump_target))
not in self.dict_addr_vis
):
id_to_move = s.globals["id"]
simgr.move(
source_stash, "active", lambda s: s.globals["id"] == id_to_move
)
# self.log.info('optimization for exploration used')
return
self.take_smallest(simgr, source_stash)
def __take_custom_deep(self, simgr, source_stash):
id_to_move = 0
if len(simgr.stashes[source_stash]) == 0:
return
for s in simgr.stashes[source_stash]:
if (
str(self.check_constraint(s, s.history.jump_target))
not in self.dict_addr_vis
):
id_to_move = s.globals["id"]
simgr.move(
source_stash, "active", lambda s: s.globals["id"] == id_to_move
)
# self.log.info('optimization for exploration used')
return
self.take_longuest(simgr, source_stash)
def __change_main_state(self, simgr, source_stash):
"""
Take a state of source_stash and append it to active stash
@pre : source_stash exists
"""
if len(simgr.stashes[source_stash]) > 0:
simgr.stashes["active"].append(simgr.stashes[source_stash].pop())
def mv_bad_active(self, simgr):
"""
Take simulation manager and discard states that :
- Exceed max number of step
- Execute too many times a simple loop
"""
# Discard Loop without symbolic variable which takes too much time
for state in simgr.active:
test = str(state.history.jump_target) + "-" + str(state.history.jump_source)
if test in self.jump_concrete_dict[state.globals["id"]]:
self.jump_concrete_dict[state.globals["id"]][test] += 1
else:
self.jump_concrete_dict[state.globals["id"]][test] = 1
if (
self.jump_concrete_dict[state.globals["id"]][test]
> self.loop_counter_concrete
):
# import pdb; pdb.set_trace()
# state.history.trim()
simgr.move(
from_stash="active",
to_stash="ExcessLoop",
filter_func=lambda s: s.globals["id"] == state.globals["id"],
)
self.log.info("A state has been discarded because of simple loop")
if state.globals["n_steps"] % 1000 == 0:
self.log.debug("n_steps = " + str(state.globals["n_steps"]))
if state.globals["n_steps"] > self.max_step:
# import pdb; pdb.set_trace()
state.history.trim()
simgr.move(
from_stash="active",
to_stash="ExcessStep",
filter_func=lambda s: s.globals["id"] == state.globals["id"],
)
self.log.info("A state has been discarded because of max_step reached")
def __mv_new_addr_state(self, simgr):
"""
Check new_addr stash and update it correctly
"""
for s in simgr.stashes["new_addr"]:
if (
str(self.check_constraint(s, s.history.jump_target))
in self.dict_addr_vis
):
id_to_move = s.globals["id"]
simgr.move("new_addr", "pause", lambda s: s.globals["id"] == id_to_move)
# self.log.info('optimization for exploration used')
return
def __update_id_stash(self, simgr, id, new_id):
"""
Inspect active stash
Update two ids that are the same to new_id
Return states have this initial id
"""
found = False
was_excess = False
first_state = None
for state in simgr.active:
if state.globals["id"] == id:
# Case 1 : First state of stash could be a JumpExcedeed, second is not
if found and not state.globals["JumpExcedeed"]:
if was_excess:
state.globals["id"] = new_id
return first_state, state
return state, first_state
# Case 2 : First state of stash could not be a JumpExcedeed, second is !
elif found and state.globals["JumpExcedeed"]:
return state, first_state
# Case 3 : First state of stash IS a jumpExcedeed !
elif not found and state.globals["JumpExcedeed"]:
found = True
was_excess = True
first_state = state
# Case 4 : First state of stash IS NOT a jumpExcedeed !
else:
found = True
state.globals["id"] = new_id
first_state = state
# Was a 'fake' fork
first_state.globals["id"] = id
# Break at specific instruction and open debug mode.
def __debug_instr(self, state):
if state.inspect.instruction == int(
"0x0040123f", 16
) or state.inspect.instruction == int("0x0040126e", 16):
self.log.info("Debug function\n\n")
self.log.info(hex(state.inspect.instruction))
import pdb
pdb.set_trace()
def __debug_read(self, state):
if state.solver.eval(state.inspect.mem_read_address) == int("0xf404120", 16):
self.log.info("Read function\n\n")
self.log.info(state.inspect.mem_read_address)
import pdb
pdb.set_trace()
def __debug_write(self, state):
if state.solver.eval(state.inspect.mem_write_address) == int("0xf404120", 16):
self.log.info("Write function\n\n")
self.log.info(state.inspect.mem_write_address)
import pdb
pdb.set_trace()
def __add_addr_call(self, state):
test = state.globals["addr_call"] + [state.scratch.ins_addr]
state.globals["addr_call"] = test
def __rm_addr_call(self, state):
calls = state.globals["addr_call"]
if len(calls) > 1:
state.globals["addr_call"] = calls[1:]
def step(self, simgr, stash="active", **kwargs):
pass
def build_snapshot(self, simgr):
self.snapshot_state.clear()
for state in simgr.active:
if state.globals["id"] in self.snapshot_state:
self.fork_stack.append(state.globals["id"])
self.snapshot_state[state.globals["id"]] += 1
else:
self.snapshot_state[state.globals["id"]] = 1
state.globals["n_steps"] += 1
def manage_unconstrained(self, simgr):
if len(simgr.unconstrained) > self.unconstrained:
new_unconstrained = len(simgr.unconstrained) - self.unconstrained
for i in range(new_unconstrained):
id_cur = simgr.unconstrained[-1].globals["id"]
self.log.info(
"End of the trace number " + str(id_cur) + " unconstrained"
)
self.unconstrained = len(simgr.unconstrained)
def manage_error(self, simgr):
if len(simgr.errored) > self.errored:
new_errors = len(simgr.errored) - self.errored
self.log.info(simgr.errored)
for i in range(new_errors):
id_cur = simgr.errored[-i - 1].state.globals["id"]
self.log.info("End of the trace number " + str(id_cur) + " with errors")
simgr.errored[-i - 1]
if self.debug_error:
# import pdb
# pdb.set_trace()
# last_error.debug()
pass
self.errored = len(simgr.errored)
def drop_excessed_loop(self, simgr):
excess_loop = len(simgr.stashes["ExcessLoop"]) - (self.max_in_pause_stach / 5)
excess_loop = int(excess_loop) # TODO chris check how we round (up-down)
if excess_loop > 0:
id_to_stash = []
# print(excess_loop)
state_to_stash = simgr.stashes["ExcessLoop"][-excess_loop:]
for t in state_to_stash:
id_to_stash.append(t.globals["id"])
simgr.drop(
filter_func=lambda s: s.globals["id"] in id_to_stash, stash="ExcessLoop"
)
def excessed_step_to_active(self, simgr):
if len(simgr.active) == 0 and len(simgr.stashes["ExcessStep"]) > 0:
moves = min(len(simgr.stashes["ExcessStep"]), self.max_simul_state)
id_move = []
for i in range(moves):
state = simgr.stashes["ExcessStep"][i]
self.id = state.globals["id"]
id_move.append(self.id)
state.globals["n_steps"] = 0
simgr.move(
from_stash="ExcessStep",
to_stash="active",
filter_func=lambda s: s.globals["id"] in id_move,
)
def excessed_loop_to_active(self, simgr):
if len(simgr.active) == 0 and len(simgr.stashes["ExcessLoop"]) > 0:
moves = min(len(simgr.stashes["ExcessLoop"]), self.max_simul_state)
id_move = []
for i in range(moves):
state = simgr.stashes["ExcessLoop"][i]
self.id = state.globals["id"]
id_move.append(self.id)
state.globals["JumpExcedeed"] = False
self.jump_dict[self.id].clear()
self.jump_concrete_dict[self.id].clear()
simgr.move(
from_stash="ExcessLoop",
to_stash="active",
filter_func=lambda s: s.globals["id"] in id_move,
)
def manage_pause(self, simgr):
# If too many states are explored simulateously, move some of them to pause stash.
if len(simgr.active) > self.max_simul_state:
excess = len(simgr.active) - self.max_simul_state
state_to_stash = simgr.active[-excess:]
id_to_stash = []
for t in state_to_stash:
id_to_stash.append(t.globals["id"])
simgr.move(
from_stash="active",
to_stash="pause",
filter_func=lambda s: s.globals["id"] in id_to_stash,
)
# If there is too much states in pause stash, discard some | |
<filename>dataloader.py
# coding:utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import cPickle
import h5py
import os, time, pdb
import numpy as np
import random
import torch
import torch.utils.data as data
import multiprocessing
import pandas as pd
class DataLoader(data.Dataset):
def reset_iterator(self, split):
del self._prefetch_process[split]
self._prefetch_process[split] = BlobFetcher(split,
self, (split == 'train') and (self.opt.shuffle))
self.iterators[split] = 0
def get_vocab_size(self):
return self.vocab_size
def get_dataset_dize(self, mode):
return len(self.split_ix[mode])
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def get_other_feats(self, other_features):
other_feats = {'lda': None}
if 'lda' in other_features:
lda_file = h5py.File(self.opt.input_lda_path, 'r')
lda_data = {vid: lda_file[vid].value for vid in lda_file.keys()}
lda_file.close()
other_feats['lda'] = lda_data
return other_feats
def get_c3d_feature(self, video_id):
feature = np.load(os.path.join(self.input_c3d_dir2, video_id + '.npy')).astype('float32')
mean = -0.001915027447565527
var = 1.9239444588254049
feature = (feature - mean) / np.sqrt(var)
att_feature = np.zeros((1, 1, 1)).astype('float32')
return feature, att_feature
def get_twostream_feature(self, video_id):
path = os.path.join(self.opt.input_twostream_dir, 'spatial', 'csv_action', video_id + '.csv')
if not os.path.exists(path):
vid_len = np.load(os.path.join(self.input_c3d_dir2, video_id + '.npy')).astype('float32').shape[0]
att_feature = np.zeros((1, 1, 1)).astype('float32')
return np.zeros((vid_len, 400)), att_feature
spatial = pd.read_csv(path)
OF = pd.read_csv(os.path.join(self.opt.input_twostream_dir, 'OF', 'csv_action', video_id + '.csv'))
if spatial.shape[0] >= OF.shape[0]:
vid_len = OF.shape[0]
else:
vid_len = spatial.shape[0]
feature = np.concatenate((spatial[:vid_len], OF[:vid_len]),1)
att_feature = np.zeros((1, 1, 1)).astype('float32')
return feature,att_feature
def get_data(self, ix):
video_id = self.info['videos'][ix]['video_id']
# feature = np.array(self.feats_c3d[video_id]['c3d_features']).astype('float32')
features, att_features = [], []
if vars(self.opt).get('use_c3d_feature',True):
feature1, att_feature1 = self.get_c3d_feature(video_id)
features.append(feature1)
att_features.append(att_feature1)
if vars(self.opt).get('use_2stream_feature',False):
feature2, att_feature2 = self.get_twostream_feature(video_id)
feature2 = feature2[::2]
att_feature2 = att_feature2[::2]
features.append(feature2)
att_features.append(att_feature2)
vid_len = 1e10
for f in features:
vid_len = f.shape[0] if f.shape[0] < vid_len else vid_len
features = [f[:vid_len] for f in features]
feature = np.concatenate(features, 1).astype('float32')
att_feature = np.concatenate(att_features, 1).astype('float32')
iou_scores, tap_masks, gts_index, gt_featstamps, tap_other = self.get_vid_data(video_id, feature.shape[0])
if self.use_SOTA_tep:
SOTA_featstamps, SOTA_Prop_score, SOTA_timestamps = self.get_SOTA_TEP_label(video_id, feature.shape[0])
else:
SOTA_featstamps = SOTA_Prop_score = SOTA_timestamps = None
w1 = np.array(self.w1).astype('float32')
tap_labels = (iou_scores >= self.opt.iou_threshold)
tap_masks_good_proposal = (iou_scores >= self.opt.iou_threshold_for_good_proposal) # * tap_masks
lda_feat = np.array(self.other_feats['lda'][video_id]).astype('float32') if self.opt.use_lda else np.array(
[0])
other = {}
train_only = {}
other['gt_featstamps'] = gt_featstamps
other['SOTA_featstamps'] = SOTA_featstamps
other['SOTA_timestamps'] = SOTA_timestamps
other['SOTA_Prop_score'] = SOTA_Prop_score
# if ix < self.train_length: # if ix is in training set
if True:
tap_gts_for_good_proposal = (tap_masks_good_proposal * (gts_index + 1) - 1).astype('int')
proposal_num = (tap_gts_for_good_proposal >= 0).sum()
# assert ncap == tap_gts_for_good_proposal.max() + 1
other['tap_gts_for_good_proposal'] = tap_gts_for_good_proposal
if self.opt.tap_model == "sst_1stage" and proposal_num > 0:
tap_list, lm_list, soi_list, sampled_ids, action_label = self.get_shuffle_list(tap_gts_for_good_proposal,gt_featstamps,
method='1stage')
other['action_label'] = action_label
else:
tap_list, lm_list, soi_list, sampled_ids = self.get_shuffle_list(tap_gts_for_good_proposal,gt_featstamps,
method='random')
train_only['ind_select_list'] = np.array(tap_list[sampled_ids]).astype('int') # sampled
train_only['ind_select_list_eval'] = np.array(tap_list).astype('int') # sampled
train_only['cg_select_list'] = np.array(lm_list[sampled_ids]).astype('int') # sampled
train_only['soi_select_list'] = np.array(soi_list[sampled_ids]).astype('int') # sampled
train_only['soi_select_list_eval'] = np.array(soi_list).astype('int') # sampled
train_only['sampled_ids'] = np.array(sampled_ids).astype('int')
return [feature,
lda_feat,
att_feature,
tap_labels,
tap_masks,
iou_scores,
gts_index,
tap_masks_good_proposal,
train_only,
# tap_good_proposal_info,
w1,
ix,
other]
def __init__(self, opt):
# initial some variables
self.opt = opt
self.batch_size = self.opt.batch_size
self.use_att = getattr(opt, 'use_att', False)
self.iou_threshold = self.opt.iou_threshold
self.iou_threshold_good = self.opt.iou_threshold_for_good_proposal
# self.label_file_for_tap = self.opt.label_file_for_tap
self.input_c3d_dir2 = opt.input_c3d_dir2
with open(self.opt.w1_json) as f:
self.w1 = json.load(f)
with open(self.opt.video_json) as f:
self.data = json.load(f)
self.use_SOTA_tep = vars(self.opt).get('SOTA_json', None)
if self.use_SOTA_tep:
with open(self.opt.SOTA_json) as f:
self.SOTA_TEP_Poporal = json.load(f)['results']
self.K = self.opt.K
self.prop_sample_num = opt.prop_sample_num
# load json file which contains additional information about dataset
print('DataLoader loading features file: ', opt.input_c3d_dir2)
print('DataLoader loading train label file: ', opt.train_label_for_cg)
print('DataLoader loading val label file: ', opt.val_label_for_cg)
with open(self.opt.video_data_for_cg) as f:
self.info = json.load(f)
print('DataLoader loading video_data_information file: ', opt.video_data_for_cg)
self.ix_to_word = self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the label file
train_label_h5 = h5py.File(self.opt.train_label_for_cg, 'r', driver='core')
self.train_label_file = {key: train_label_h5[key].value for
key in train_label_h5.keys()}
train_label_h5.close()
val_label_h5 = h5py.File(self.opt.val_label_for_cg, 'r', )
self.val_label_file = {key: val_label_h5[key].value for key in
val_label_h5.keys()}
val_label_h5.close()
if vars(self.opt).get('other_features', 0) != 0:
self.other_feats = self.get_other_feats(self.opt.other_features)
seq_size = self.train_label_file['labels'].shape
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the index of sentences for all videos
# end_ix - start_ix is the number of senteces for a video
self.train_label_start_ix = self.train_label_file['label_start_ix'][:]
self.train_label_end_ix = self.train_label_file['label_end_ix'][:]
self.val_label_start_ix = self.val_label_file['label_start_ix'][:]
self.val_label_end_ix = self.val_label_file['label_end_ix'][:]
self.val_videos = self.val_label_start_ix.shape[0]
self.train_videos = self.train_label_start_ix.shape[0]
print('there are %d videos to be trained' % (self.train_videos))
print("there are %d videos in validation " % (self.val_videos))
self.split_ix = {'train': [], 'val': [], 'test': []}
# separate out indexes for each of the provided splits
for ix in range(len(self.info['videos'])):
# if ix % 10 != 0:
# continue
video = self.info['videos'][ix]
if video['split'] == 'train':
self.split_ix['train'].append(ix)
elif video['split'] == 'val':
self.split_ix['val'].append(ix)
elif video['split'] == 'test':
self.split_ix['test'].append(ix)
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
print('assigned %d videos to split train' % len(self.split_ix['train']))
print('assigned %d videos to split val' % len(self.split_ix['val']))
print('assigned %d videos to split test' % len(self.split_ix['test']))
self.train_length = self.train_videos
self.val_length = self.val_videos
# self.test_length = len(self.split_ix['test'])
self.iterators = {'train': 0, 'val': 0, 'test': 0}
self._prefetch_process = {} # The three prefetch process
for split in self.iterators.keys():
self._prefetch_process[split] = BlobFetcher(split,
self, (split == 'train') and (opt.shuffle))
# BlobFetcher(train,self,train)
# Terminate the child process when the parent exists
def cleanup():
print('Terminating BlobFetcher')
for split in self.iterators.keys():
del self._prefetch_process[split]
import atexit
atexit.register(cleanup)
# calculate the iou value
def iou(self, interval, featstamps, return_index=False):
start_i, end_i = interval[0], interval[1]
output = 0.0
gt_index = -1
for i, (start, end) in enumerate(featstamps):
start = start - 0.01
end = end + 0.01
intersection = max(0, min(end, end_i) - max(start, start_i))
union = min(max(end, end_i) - min(start, start_i), end - start + end_i - start_i)
overlap = float(intersection) / (union + 1e-8)
if overlap >= output:
output = overlap
gt_index = i
if return_index:
return output, gt_index
return output
def event_distance(self, featstamps1, featstamp2):
s1, e1 = featstamps1
s2, e2 = featstamp2
intersection = max(0, min(e1, e2) - max(s1, s2))
union = min(max(e1, e2) - min(s1, s2), e1 - s1 + e2 - s2)
d = float(intersection) / (e1 - s1) + float(intersection) / (e2 - s2)
return d
# calculat the features for each gt proposal
def timestamp_to_featstamp(self, timestamp, nfeats, duration):
start, end = timestamp
start = max(min(int(round(start / duration * nfeats)), nfeats - 2), 0)
end = min(max(int(round(end / duration * nfeats)), start + 1), nfeats - 1)
return start, end
def featstamp_to_time(self, start_f, end_f, nfeats, duration):
time_per_feat = duration / nfeats
start = min(max(0, start_f * time_per_feat), duration - time_per_feat)
end = max(end_f * time_per_feat, start + time_per_feat)
return start, end
def get_SOTA_TEP_label(self, video_id, nfeats):
duration = self.data[video_id]['duration']
others = {}
SOTA_featstamps = None
SOTA_Prop_score = None
SOTA_timestamps = None
if video_id[2:] in self.SOTA_TEP_Poporal.keys():
SOTA_timestamps = [event['segment'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
SOTA_featstamps = [self.timestamp_to_featstamp(x, nfeats, duration) for x in SOTA_timestamps]
SOTA_Prop_score = [event['score'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
# others['SOTA_featstamps'] = SOTA_featstamps
# others['SOTA_Prop_score'] = SOTA_Prop_score
return SOTA_featstamps, SOTA_Prop_score, SOTA_timestamps
def get_vid_data(self, video_id, nfeats):
# feats = features[video_id]["c3d_features"]
duration = self.data[video_id]['duration']
timestamps = self.data[video_id]['timestamps']
featstamps = [self.timestamp_to_featstamp(x, nfeats, duration) for x in timestamps]
SOTA_featstamps = None
SOTA_Prop_score = None
if self.use_SOTA_tep:
if video_id[2:] in self.SOTA_TEP_Poporal.keys():
SOTA_timestamps = [event['segment'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
SOTA_featstamps = [self.timestamp_to_featstamp(x, nfeats, duration) for x in SOTA_timestamps]
SOTA_Prop_score = [event['score'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
time_per_feat = duration / nfeats
nb_prop = len(featstamps)
iou_scores = np.zeros([nfeats, self.K], dtype='float32')
gts_index = np.zeros([nfeats, self.K], dtype='float32')
S_iou_scores = np.zeros([nfeats, nfeats], dtype='float32')
# gt_captured = []
tap_masks = np.zeros([nfeats, self.K], dtype='float32')
S_tap_masks = np.zeros([nfeats, nfeats], dtype='float32')
for index in range(nfeats):
tap_masks[index, :min(self.K, index)] = 1
for t in range(nfeats):
for k in xrange(self.K):
if t >= k + 1:
iou, gt_index = self.iou([t - k - 1, t], featstamps, return_index=True)
iou_scores[t, k] = iou
gts_index[t, k] = gt_index
S_iou_scores[t - k - 1, t] = iou
S_tap_masks[t - k - 1, t] = 1
others = {}
others['S_iou_scores'] = S_iou_scores
others['S_tap_masks'] = S_tap_masks
others['SOTA_featstamps'] = SOTA_featstamps
others['SOTA_Prop_score'] = SOTA_Prop_score
return iou_scores, tap_masks, gts_index, featstamps, others
def get_batch(self, split, batch_size=None):
batch_size = batch_size or self.batch_size
wrapped = False
infos = []
prop_captured = []
data = {}
for i in range(batch_size):
# fetch videos,labels,temp_att and some other information
tmp_c3d, tmp_lda, tmp_att, tap_label, tap_masks, iou_scores, gts_index, tap_masks_good_proposal, train_only, w1, | |
resources[index + 1], resources[index]
self.collection.set_dirty(True)
indexes = [index + 1 for index in indexes]
self.update_table(table, resources, indexes)
self.update_ui()
message = "Resource moved" if len(indexes) == 1 else "Resources moved"
self.statusBar().showMessage(message, 5000)
def edit_move_left(self):
"""Move the active tab to the left.
"""
index = self.central_widget.currentIndex()
self.collection[index - 1], self.collection[index] = self.collection[index], self.collection[index - 1]
self.collection.set_dirty(True)
self.update_widget()
self.central_widget.setCurrentIndex(index - 1)
self.statusBar().showMessage("Tab moved", 5000)
def edit_move_right(self):
"""Move the active tab to the right.
"""
index = self.central_widget.currentIndex()
self.collection[index + 1], self.collection[index] = self.collection[index], self.collection[index + 1]
self.collection.set_dirty(True)
self.update_widget()
self.central_widget.setCurrentIndex(index + 1)
self.statusBar().showMessage("Tab moved", 5000)
def edit_move_up(self):
"""Move the selected resource up one line.
"""
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
indexes = sorted([selected.row() for selected in table.selectionModel().selectedRows()])
for index in indexes:
resources[index - 1], resources[index] = resources[index], resources[index - 1]
self.collection.set_dirty(True)
indexes = [index - 1 for index in indexes]
self.update_table(table, resources, indexes)
self.update_ui()
message = "Resource moved" if len(indexes) == 1 else "Resources moved"
self.statusBar().showMessage(message, 5000)
def edit_paste(self):
"""Paste the content of the clipboard to the resources.
"""
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
new_resources = QApplication.clipboard().text().strip().split("\n")
indexes = []
row = self.central_widget.currentWidget().currentRow() + 1
for data in new_resources:
data = data.split("\t")
if len(data) == 1:
if data[0].startswith("file:///"):
file = data[0][len("file:///") + len(os.path.dirname(self.collection.file_name())):]
else:
file = data[0]
resource = qrcdata.Resource(file)
else:
resource = qrcdata.Resource(data[1], data[0])
resources.insert(row, resource)
indexes.append(row)
row += 1
self.update_table(self.central_widget.currentWidget(), self.collection[table_index], indexes)
self.collection.set_dirty(True)
self.update_ui()
self.statusBar().showMessage("Clipboard pasted", 5000)
def edit_remove_resource(self):
"""Remove the selected resource.
"""
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
indexes = sorted([selected.row() for selected in table.selectionModel().selectedRows()], reverse=True)
message = "Resources removed" if len(indexes) > 1 else "Resource removed"
for index in indexes:
resources.pop(index)
self.collection.set_dirty(True)
self.update_table(table, resources)
self.update_ui()
self.statusBar().showMessage(message, 5000)
def edit_remove_tab(self, index=-1):
"""remove a tab.
Parameters:
index (int) the index of the tab to close, current tab closed if index = -1
"""
if index >= 0:
self.central_widget.setCurrentIndex(index)
reply = QMessageBox.question(self, "QRC Editor - Remove Tab", "Remove the tab and all its resources?",
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
self.collection.pop(self.central_widget.currentIndex())
self.collection.set_dirty(True)
self.update_widget()
self.statusBar().showMessage("Tab removed", 5000)
def edit_settings(self):
"""Open the settings dialog.
"""
dialog = qrcdlg.ResourceSettingsDlg(self.options, self)
if dialog.exec_():
self.statusBar().showMessage("Settings updated", 5000)
def edit_sort(self):
"""Open the sort dialog.
"""
dialog = qrcdlg.TabSortDlg(self)
if dialog.exec_():
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
indexes = [selected.row() for selected in table.selectionModel().selectedRows()]
selected_resources = [resources[index] for index in indexes]
if dialog.key_combo_box.currentIndex() == 0:
resources.sort(key=lambda resource: [resource.alias(), resource.file()],
reverse=dialog.reverse_checkbox.isChecked())
else:
resources.sort(key=lambda resource: [resource.file(), resource.alias()],
reverse=dialog.reverse_checkbox.isChecked())
self.collection.set_dirty(True)
indexes = [resources.index(resource) for resource in selected_resources]
self.update_table(table, resources, indexes)
self.update_ui()
self.statusBar().showMessage("Table updated", 5000)
def edit_update(self):
"""Update the table.
"""
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
self.update_table(table, resources, table.currentRow())
self.update_ui()
self.statusBar().showMessage("Table updated", 5000)
def file_compile(self):
"""Compile a resource collection to a .py file.
"""
if not self.ok_to_continue():
return
file_name = self.collection.file_name()[:-4] + ".py"
file_name, _ = QFileDialog.getSaveFileName(self, "QRC Editor - Compile Resource Collection File",
file_name, "Python file (*.py)")
if file_name:
options = [self.options["program"], "-o", file_name]
if self.options["no_compress"]:
options.append("-no-compress")
if self.options["compress"]:
options.extend(["-compress", "{0}".format(self.options["compress_level"])])
if self.options["threshold"]:
options.extend(["-threshold", "{0}".format(self.options["threshold_level"])])
options.append(self.collection.file_name())
completed = None
try:
completed = subprocess.run(options, check=True)
except (IOError, OSError, subprocess.CalledProcessError) as err:
QMessageBox.critical(self, "Compile Error", "There was an error during the process: {0}".format(err))
if completed and completed.returncode == 0:
self.statusBar().showMessage("{0} successfully compiled".format(os.path.basename(file_name)), 5000)
def file_new(self):
"""Create a new file.
"""
file_name, _ = QFileDialog.getSaveFileName(self, "QRC Editor - Save Resource Collection File",
".", "Resource Collection file (*.qrc)")
if file_name:
if file_name[-4:].lower() != ".qrc":
file_name += ".qrc"
if not self.collection.dirty() and self.collection.file_name().startswith("Unnamed"):
self.collection.set_file_name(file_name)
self.update_ui()
else:
QrcEditor(file_name).show()
def file_open(self):
"""Create the dialog to select and then open a qrc file.
"""
file_dir = os.path.dirname(self.collection.file_name())\
if self.collection.file_name() is not None else "."
file_name, _ = QFileDialog.getOpenFileName(self, "QRC Editor - Load Resource Collection File",
file_dir, "Resource Collection file (*.qrc)")
if file_name:
if file_name[-4:].lower() != ".qrc":
file_name += ".qrc"
if not self.is_open(file_name):
if not self.collection.dirty() and self.collection.file_name().startswith("Unnamed"):
_, message = self.collection.load(file_name)
self.statusBar().showMessage(message, 5000)
else:
QrcEditor(file_name).show()
self.update_widget()
self.update_ui()
@staticmethod
def file_quit():
"""Close all the files and exit the application.
"""
QApplication.closeAllWindows()
def file_save(self):
"""Save a file.
"""
if self.collection.file_name().startswith("Unnamed"):
self.file_save_as()
else:
result, message = self.collection.save()
self.statusBar().showMessage(message, 5000)
self.update_ui()
return result
def file_save_all(self):
"""Save all the files.
"""
count = 0
for editor in QrcEditor.instances:
if editor.collection.dirty():
ok, message = editor.collection.save()
if ok:
count += 1
self.statusBar().showMessage(message, 5000)
self.statusBar().showMessage("Saved {0} of {1} files".format(count, len(QrcEditor.instances)), 5000)
self.update_ui()
def file_save_as(self):
"""Create the dialog to save a new file.
"""
file_name = self.collection.file_name() if self.collection.file_name() else "."
file_name, _ = QFileDialog.getSaveFileName(self, "QRC Editor - Save Resource Collection File",
file_name, "Resource Collection file (*.qrc)")
if file_name:
if file_name[-4:].lower() != ".qrc":
file_name += ".qrc"
result, message = self.collection.save(file_name)
self.statusBar().showMessage(message, 5000)
self.update_widget(self.central_widget.currentIndex())
self.update_ui()
return result
def help_about(self):
"""Open the about message.
"""
message = """<b>QRC Editor</b> v {0}
<p>Copyright © Sanfe Ltd.
All rights reserved.
<p>This application can be used to create and
compile a resource collection file that can
be used in in python pyside2 projects.
<p> Python {1} - Qt {2} - PySide2 {3}
""".format(__version__, platform.python_version(), PySide2.QtCore.__version__, PySide2.__version__)
if self.rcc_version is not None:
message += " - {0}".format(self.rcc_version)
message += " on {0}.<p> Icons by <a href='https://icons8.com'>Icons8</a>".format(platform.system())
QMessageBox.about(self, "About QRC Editor", message)
def load_settings(self):
"""Load settings for the application.
"""
settings = QSettings()
if (geometry := settings.value("Geometry")) is not None:
self.restoreGeometry(geometry)
if (state := settings.value("MainWindow/State")) is not None:
self.restoreState(state)
if (program := settings.value("Options/Program")) and self.check_program(program):
self.options["program"] = program
else:
self.options["program"] = "pyside2-rcc.exe"
if (no_compress := settings.value("Options/NoCompress")) is not None:
self.options["no_compress"] = True if no_compress == "true" else False
if (compress := settings.value("Options/Compress")) is not None:
self.options["compress"] = True if compress == "true" else False
if (compress_level := settings.value("Options/CompressLevel")) is not None:
self.options["compress_level"] = int(compress_level)
if (threshold := settings.value("Options/Threshold")) is not None:
self.options["threshold"] = True if threshold == "true" else False
if (threshold_level := settings.value("Options/ThresholdLevel")) is not None:
self.options["threshold_level"] = int(threshold_level)
def raise_window(self):
"""Raise and make active editor_to_rise
"""
title = self.sender().text().split(maxsplit=1)[1]
for editor in QrcEditor.instances:
if editor.windowTitle()[:-3] == title:
editor.activateWindow()
editor.raise_()
break
def update_table(self, table, resources, current_indexes=[]):
"""Create a table and populate it.
Parameters:
table (QTabWidget): the table to populate
resources: the resources used to populate the table
current_indexes: the list of indexes of the current resources, to keep the correct resource selected
Return:
QTabWidget: the populated table
"""
table.clearSelection()
table.setRowCount(len(resources))
table.setColumnCount(2)
table.setHorizontalHeaderLabels(["Alias", "File"])
table.setAlternatingRowColors(True)
table.setEditTriggers(QTableWidget.NoEditTriggers)
table.setSelectionBehavior(QTableWidget.SelectRows)
table.setSelectionMode(QTableWidget.MultiSelection)
table.setContextMenuPolicy(Qt.ActionsContextMenu)
self.add_actions(table, (self.edit_paste_action, self.edit_copy_action, self.edit_cut_action,
self.edit_add_resource_action, self.edit_edit_resource_action,
self.edit_remove_resource_action, self.edit_move_up_action,
self.edit_move_down_action, self.edit_update_action))
for row, resource in enumerate(resources):
alias = QTableWidgetItem(resource.alias())
file = QTableWidgetItem(resource.file())
if resources.is_duplicate(resource.alias()):
alias.setTextColor(Qt.red)
else:
alias.setTextColor(Qt.black)
if os.path.isfile(os.path.join(os.path.dirname(self.collection.file_name()), resource.file())):
file.setTextColor(Qt.black)
else:
file.setTextColor(Qt.red)
table.setItem(row, 0, alias)
table.setItem(row, 1, file)
table.resizeColumnsToContents()
for index in current_indexes:
table.selectRow(index)
table.setFocus()
return table
def update_ui(self):
"""Update the ui enabling and disabling actions.
"""
file_name_exist = (file_name := self.collection.file_name()) is not None
table_exist = (table := self.central_widget.currentWidget()) is not None
resource_selected = table_exist and len(table.selectionModel().selectedRows()) > 0
multiple_rows = table_exist and table.rowCount() > 1
multiple_tables = len(self.collection) > 1
self.setWindowTitle("QRC Editor - {0}[*]".format(os.path.basename(file_name)))
self.setWindowModified(self.collection.dirty())
if table_exist:
self.edit_edit_tab_action.setEnabled(True)
self.edit_remove_tab_action.setEnabled(True)
else:
self.edit_edit_tab_action.setEnabled(False)
self.edit_remove_tab_action.setEnabled(False)
if resource_selected:
self.edit_edit_resource_action.setEnabled(True)
self.edit_remove_resource_action.setEnabled(True)
self.edit_copy_action.setEnabled(True)
self.edit_cut_action.setEnabled(True)
else:
self.edit_edit_resource_action.setEnabled(False)
self.edit_remove_resource_action.setEnabled(False)
self.edit_copy_action.setEnabled(False)
self.edit_cut_action.setEnabled(False)
if file_name_exist and table_exist:
self.edit_add_resource_action.setEnabled(True)
self.file_compile_action.setEnabled(True)
else:
self.file_compile_action.setEnabled(False)
self.edit_add_resource_action.setEnabled(False)
if multiple_rows and resource_selected:
indexes = [selected.row() for selected in table.selectionModel().selectedRows()]
self.edit_move_down_action.setEnabled(max(indexes) < table.rowCount() - 1)
self.edit_move_up_action.setEnabled(min(indexes) > 0)
else:
self.edit_move_down_action.setEnabled(False)
self.edit_move_up_action.setEnabled(False)
if multiple_tables:
self.edit_move_left_action.setEnabled((index := self.central_widget.currentIndex()) > 0)
self.edit_move_right_action.setEnabled(index < len(self.collection) - 1)
else:
self.edit_move_left_action.setEnabled(False)
self.edit_move_right_action.setEnabled(False)
self.edit_sort_action.setEnabled(multiple_rows)
self.edit_update_action.setEnabled(len(self.collection) > 0)
def update_widget(self, current=None):
"""Update the central widget populating the tabs.
Parameters:
current (int): the index of the current tab, to keep it in focus
"""
self.central_widget.clear()
for index, resources in enumerate(self.collection):
title = ""
if index < 10:
title += "&{0} - Lang: ".format(index)
else:
title += "{0} - Lang: ".format(index)
language = resources.language() if resources.language() is not None else "Default"
title += language
if resources.prefix() is not None:
title += " - Prefix: {0}".format(resources.prefix())
table = QTableWidget()
self.update_table(table, resources)
table.itemSelectionChanged.connect(self.update_ui)
table.itemDoubleClicked.connect(self.edit_edit_resource)
QShortcut(QKeySequence("Return"), table, self.edit_edit_resource)
self.central_widget.addTab(table, QIcon(":/icon.png"), title)
if current:
self.central_widget.setCurrentIndex(current)
def update_window_menu(self):
"""Update the window | |
"""
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
"""
import numpy as np
import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
from itertools import permutations, chain
from math import factorial
from os import path
def my_softmax(input, axis=1):
trans_input = input.transpose(axis, 0).contiguous()
soft_max_1d = F.softmax(trans_input, dim=0) # added dim=0 as implicit choice is deprecated, dim 0 is edgetype due to transpose
return soft_max_1d.transpose(axis, 0)
def binary_concrete(logits, tau=1, hard=False, eps=1e-10):
y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)
if hard:
y_hard = (y_soft > 0.5).float()
y = Variable(y_hard.data - y_soft.data) + y_soft
else:
y = y_soft
return y
def binary_concrete_sample(logits, tau=1, eps=1e-10):
logistic_noise = sample_logistic(logits.size(), eps=eps)
if logits.is_cuda:
logistic_noise = logistic_noise.cuda()
y = logits + Variable(logistic_noise)
return F.sigmoid(y / tau)
def sample_logistic(shape, eps=1e-10):
uniform = torch.rand(shape).float()
return torch.log(uniform + eps) - torch.log(1 - uniform + eps)
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return - torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/3<PASSWORD>
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + Variable(gumbel_noise)
return my_softmax(y / tau, axis=-1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcf<PASSWORD>9<PASSWORD>3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
def my_sigmoid(logits, hard=True, sharpness=1.0):
edges_soft = 1/(1+torch.exp(-sharpness*logits))
if hard:
edges_hard = torch.round(edges_soft)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
if edges_soft.is_cuda:
edges_hard = edges_hard.cuda()
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
edges = Variable(edges_hard - edges_soft.data) + edges_soft
else:
edges = edges_soft
return edges
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def edge_type_encode(edges): # this is used to gives each 'interaction strength' a unique integer = 0, 1, 2 ..
unique = np.unique(edges)
encode = np.zeros(edges.shape)
for i in range(unique.shape[0]):
encode += np.where( edges == unique[i], i, 0)
return encode
def loader_edges_encode(edges, num_atoms):
edges = np.reshape(edges, [edges.shape[0], edges.shape[1], num_atoms ** 2])
edges = np.array(edge_type_encode(edges), dtype=np.int64)
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms])
edges = edges[:,:, off_diag_idx]
return edges
def loader_combine_edges(edges):
edge_types_list = [ int(np.max(edges[:,i,:]))+1 for i in range(edges.shape[1]) ]
assert( edge_types_list == sorted(edge_types_list)[::-1] )
encoded_target = np.zeros( edges[:,0,:].shape )
base = 1
for i in reversed(range(edges.shape[1])):
encoded_target += base*edges[:,i,:]
base *= edge_types_list[i]
return encoded_target.astype('int')
def load_data_NRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode(edges_train, num_atoms)
edges_valid = loader_edges_encode(edges_valid, num_atoms)
edges_test = loader_edges_encode(edges_test, num_atoms)
edges_train = loader_combine_edges(edges_train)
edges_valid = loader_combine_edges(edges_valid)
edges_test = loader_combine_edges(edges_test)
feat_train = torch.FloatTensor(feat_train)
edges_train = torch.LongTensor(edges_train)
feat_valid = torch.FloatTensor(feat_valid)
edges_valid = torch.LongTensor(edges_valid)
feat_test = torch.FloatTensor(feat_test)
edges_test = torch.LongTensor(edges_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def load_data_fNRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode( edges_train, num_atoms )
edges_valid = loader_edges_encode( edges_valid, num_atoms )
edges_test = loader_edges_encode( edges_test, num_atoms )
edges_train = torch.LongTensor(edges_train)
edges_valid = torch.LongTensor(edges_valid)
edges_test = torch.LongTensor(edges_test)
feat_train = torch.FloatTensor(feat_train)
feat_valid = torch.FloatTensor(feat_valid)
feat_test = torch.FloatTensor(feat_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def to_2d_idx(idx, num_cols):
idx = np.array(idx, dtype=np.int64)
y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)
x_idx = idx % num_cols
return x_idx, y_idx
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
| |
<reponame>DangoMelon/turbo-octo-winner
import datetime
import os
import argopy
import geopandas as gpd
import gsw
import numpy as np
import pandas as pd
import xarray as xr
from argopy import DataFetcher as ArgoDataFetcher
from argopy import IndexFetcher as ArgoIndexFetcher
from dmelon.ocean.argo import build_dl, launch_shell
from geopandas.tools import sjoin
def findPointsInPolys(pandas_df, shape_df):
# Create GeoDataFrame from pandas dataframe
argo_geodf = gpd.GeoDataFrame(
pandas_df,
geometry=gpd.points_from_xy(
pandas_df.longitude, pandas_df.latitude, crs="EPSG:4326"
),
)
# Make spatial join to filer out values outside the shapefile
pointInPolys = sjoin(argo_geodf, shape_df, op="within", how="inner")
return pointInPolys
def maskVariableShape(variable, shape):
return variable.where(
shape.mask(variable.sel(lat=slice(-20, 0), lon=slice(-90, -70))) == 0
)
# godas_clim = xr.open_dataset("godas_clim_month.nc").pottmp
# godas_zero = godas_clim.isel(level=0)
# godas_zero["level"] = 0
# godas_clim = xr.concat([godas_zero, godas_clim], dim="level")
# godas_clim
import cmocean as cmo
# import cartopy.crs as ccrs
# import cartopy.feature as cfeature
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import regionmask
# from dmelon.plotting import HQ_BORDER, format_latlon
### PLOT ###
def makePlot(
psal,
psal_raw,
temp,
temp_raw,
sla,
taux,
tauy,
ssta,
latest_date,
out_path="",
depth=850,
):
fig = plt.figure(constrained_layout=True, figsize=(8, 8), dpi=300)
spec = gridspec.GridSpec(ncols=1, nrows=4, figure=fig, height_ratios=[1, 1, 2, 2])
f_ax0 = fig.add_subplot(spec[3, :])
f_ax1 = fig.add_subplot(spec[2, :], sharex=f_ax0)
f_ax2 = fig.add_subplot(spec[1, :], sharex=f_ax0)
f_ax3 = fig.add_subplot(spec[0, :], sharex=f_ax0)
### SAL
plot_data_smooth = (
psal.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
cmap=cmo.cm.haline,
levels=33,
ax=f_ax0,
yincrease=False,
cbar_kwargs=dict(label="Salinity", pad=-0.09, ticks=np.arange(33.5, 35.2, 0.2)),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
levels=17,
ax=f_ax0,
colors="k",
linewidths=0.2,
yincrease=False,
)
lev = conts.levels.copy()
lev = lev[lev != 34.9]
f_ax0.clabel(conts, levels=lev, fontsize=7, inline_spacing=-7)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
levels=[33.8, 34.8, 35.1],
ax=f_ax0,
colors="k",
linewidths=0.8,
yincrease=False,
)
f_ax0.clabel(conts, fontsize=6.73, inline=True, inline_spacing=-7)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.set_xlim(-20, -2)
f_ax0.set_ylim(depth, 0)
f_ax0.set_ylabel("Depth [m]")
f_ax0.set_xlabel("Latitude")
f_ax0.grid(ls="--", alpha=0.5)
### TEMP
plot_data_smooth = (
temp.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=4,
vmax=25,
cmap=cmo.cm.thermal,
levels=22,
ax=f_ax1,
yincrease=False,
cbar_kwargs=dict(label="Temperature [°C]", pad=-0.09),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=4,
vmax=25,
levels=22,
ax=f_ax1,
colors="k",
linewidths=0.2,
yincrease=False,
)
# conts = plot_data_smooth.plot.contour(
# x="LATITUDE",
# vmin=14,
# vmax=29,
# levels=[0],
# ax=f_ax1,
# colors="k",
# linewidths=1,
# yincrease=False,
# )
f_ax1.clabel(conts)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.set_ylim(depth, 0)
f_ax1.set_ylabel("Depth [m]")
f_ax1.set_xlabel("Latitude")
f_ax1.grid(ls="--", alpha=0.5)
### REST
(sla.mean(dim=["time", "lon"]) * 100).plot(ax=f_ax2)
f_ax2.axhline(ls="--", c="k", lw=0.5)
f_ax2.set_yticks(np.arange(-5, 5.1, 2))
f_ax2.set_ylim(-5, 5)
f_ax2.set_ylabel("SLA [cm]")
f_ax2.set_xlabel("Latitude")
Q = f_ax2.quiver(
taux.lat[::2],
np.full_like(taux.lat, 0)[::2],
tauy.mean(dim=["time", "lon"])[::2] * 100,
taux.mean(dim=["time", "lon"])[::2] * -100,
units="xy",
scale_units="xy",
scale=1,
width=0.05,
)
f_ax2.quiverkey(
Q,
0.92,
0.85,
1,
r"$1x10^{-2} \frac{N}{m^2}$",
labelpos="E",
coordinates="axes",
fontproperties=dict(size=7),
labelsep=0.02,
)
f_ax2.text(0.885, 0.885, r"$\tau$", transform=f_ax2.transAxes)
f_ax2.grid(ls="--", alpha=0.5)
card_centerx = 1.06
card_centery = 0.5
da = 0.04
arrowprops = dict(arrowstyle="fancy", facecolor="black")
f_ax2.annotate(
"",
xy=(card_centerx + da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx - da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery + da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery - da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.text(
card_centerx + da,
card_centery,
"N",
transform=f_ax2.transAxes,
va="center",
ha="left",
)
f_ax2.text(
card_centerx - da,
card_centery,
"S",
transform=f_ax2.transAxes,
va="center",
ha="right",
)
f_ax2.text(
card_centerx,
card_centery + da * 7,
"W",
transform=f_ax2.transAxes,
va="bottom",
ha="center",
)
f_ax2.text(
card_centerx,
card_centery - da * 7,
"E",
transform=f_ax2.transAxes,
va="top",
ha="center",
)
ssta.mean(dim=["time", "lon"]).rolling(
lat=10, min_periods=1, center=True
).mean().plot(ax=f_ax3)
f_ax3.set_ylabel("SSTA [°C]")
f_ax3.set_xlabel("Latitude")
f_ax3.set_yticks(np.arange(-3.5, 3.51, 1))
f_ax3.set_ylim(-3.5, 3.5)
f_ax3.axhline(ls="--", c="k", lw=0.5)
f_ax3.grid(ls="--", alpha=0.5)
props = dict(boxstyle="round", facecolor="wheat", alpha=0.2)
f_ax0.text(
0.03,
0.95,
"d",
transform=f_ax0.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax1.text(
0.03,
0.95,
"c",
transform=f_ax1.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax2.text(
0.03,
0.9,
"b",
transform=f_ax2.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0.03,
0.9,
"a",
transform=f_ax3.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0,
1.65,
"[a] OSTIA Sea Surface Temperature Anomaly\n"
"[b] (Line) DUACS L4 Sea Level Anomaly\n"
" (Arrows) ASCAT L3 Wind Stress Anomaly",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax3.text(
0.6,
1.65,
"Clim: GODAS 1981-2010\n"
"Clim: DUACS L4 1993-2010\n"
"Clim: ASCAT - ERA adjusted 2008-2014\n",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0,
-0.3,
"[c] ARGO Vertical Temperature\n" "[d] ARGO Vertical Practical Salinity",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
# f_ax0.text(
# 0.6,
# -0.3,
# "Clim: IMARPE 1981-2020",
# transform=f_ax0.transAxes,
# verticalalignment="top",
# horizontalalignment="left",
# )
f_ax0.text(
0,
-0.15,
"Processing: IGP",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
fontsize=9,
)
f_ax0.text(
1,
-0.15,
f"Latest Date: {pd.to_datetime(latest_date.data):%d-%b-%Y}",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
f_ax0.text(
1,
-0.4,
f"*All plots shown are 30-day average of data points\n within 200nm from the coast",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_{depth}.png"))
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_{depth}.jpeg"), dpi=200)
### PLOT ANOM ###
def makePlot_anom(
psal,
psal_raw,
temp,
temp_raw,
sla,
taux,
tauy,
ssta,
latest_date,
out_path="",
depth=850,
):
fig = plt.figure(constrained_layout=True, figsize=(8, 8), dpi=300)
spec = gridspec.GridSpec(ncols=1, nrows=4, figure=fig, height_ratios=[1, 1, 2, 2])
f_ax0 = fig.add_subplot(spec[3, :])
f_ax1 = fig.add_subplot(spec[2, :], sharex=f_ax0)
f_ax2 = fig.add_subplot(spec[1, :], sharex=f_ax0)
f_ax3 = fig.add_subplot(spec[0, :], sharex=f_ax0)
### SAL
plot_data_smooth = (
psal.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
cmap=cmo.cm.haline,
levels=33,
ax=f_ax0,
yincrease=False,
cbar_kwargs=dict(label="Salinity", pad=-0.09, ticks=np.arange(33.5, 35.2, 0.2)),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
levels=17,
ax=f_ax0,
colors="k",
linewidths=0.2,
yincrease=False,
)
lev = conts.levels.copy()
lev = lev[lev != 34.9]
f_ax0.clabel(conts, levels=lev, fontsize=7, inline_spacing=-7)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
levels=[33.8, 34.8, 35.1],
ax=f_ax0,
colors="k",
linewidths=0.8,
yincrease=False,
)
f_ax0.clabel(conts, fontsize=6.73, inline=True, inline_spacing=-7)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.set_xlim(-20, -2)
f_ax0.set_ylim(depth, 0)
f_ax0.set_ylabel("Depth [m]")
f_ax0.set_xlabel("Latitude")
f_ax0.grid(ls="--", alpha=0.5)
### TEMP
plot_data_smooth = (
temp.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=-3,
vmax=3,
cmap="RdBu_r",
levels=13,
ax=f_ax1,
yincrease=False,
cbar_kwargs=dict(label="Temperature Anomaly [°C]", pad=-0.09),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=-3,
vmax=3,
levels=13,
ax=f_ax1,
colors="k",
linewidths=0.2,
yincrease=False,
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=-3,
vmax=3,
levels=[0],
ax=f_ax1,
colors="k",
linewidths=1,
yincrease=False,
)
f_ax1.clabel(conts)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.set_ylim(depth, 0)
f_ax1.set_ylabel("Depth [m]")
f_ax1.set_xlabel("Latitude")
f_ax1.grid(ls="--", alpha=0.5)
### REST
(sla.mean(dim=["time", "lon"]) * 100).plot(ax=f_ax2)
f_ax2.axhline(ls="--", c="k", lw=0.5)
f_ax2.set_yticks(np.arange(-5, 5.1, 2))
f_ax2.set_ylim(-5, 5)
f_ax2.set_ylabel("SLA [cm]")
f_ax2.set_xlabel("Latitude")
Q = f_ax2.quiver(
taux.lat[::2],
np.full_like(taux.lat, 0)[::2],
tauy.mean(dim=["time", "lon"])[::2] * 100,
taux.mean(dim=["time", "lon"])[::2] * -100,
units="xy",
scale_units="xy",
scale=1,
width=0.05,
)
f_ax2.quiverkey(
Q,
0.92,
0.85,
1,
r"$1x10^{-2} \frac{N}{m^2}$",
labelpos="E",
coordinates="axes",
fontproperties=dict(size=7),
labelsep=0.02,
)
f_ax2.text(0.885, 0.885, r"$\tau$", transform=f_ax2.transAxes)
f_ax2.grid(ls="--", alpha=0.5)
card_centerx = 1.06
card_centery = 0.5
da = 0.04
arrowprops = dict(arrowstyle="fancy", facecolor="black")
f_ax2.annotate(
"",
xy=(card_centerx + da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx - da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery + da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery - da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.text(
card_centerx + da,
card_centery,
"N",
transform=f_ax2.transAxes,
va="center",
ha="left",
)
f_ax2.text(
card_centerx - da,
card_centery,
"S",
transform=f_ax2.transAxes,
va="center",
ha="right",
)
f_ax2.text(
card_centerx,
card_centery + da * 7,
"W",
transform=f_ax2.transAxes,
va="bottom",
ha="center",
)
f_ax2.text(
card_centerx,
card_centery - da * 7,
"E",
transform=f_ax2.transAxes,
va="top",
ha="center",
)
ssta.mean(dim=["time", "lon"]).rolling(
lat=10, min_periods=1, center=True
).mean().plot(ax=f_ax3)
f_ax3.set_ylabel("SSTA [°C]")
f_ax3.set_xlabel("Latitude")
f_ax3.set_yticks(np.arange(-3.5, 3.51, 1))
f_ax3.set_ylim(-3.5, 3.5)
f_ax3.axhline(ls="--", c="k", lw=0.5)
f_ax3.grid(ls="--", alpha=0.5)
props = dict(boxstyle="round", facecolor="wheat", alpha=0.2)
f_ax0.text(
0.03,
0.95,
"d",
transform=f_ax0.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax1.text(
0.03,
0.95,
"c",
transform=f_ax1.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax2.text(
0.03,
0.9,
"b",
transform=f_ax2.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0.03,
0.9,
"a",
transform=f_ax3.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0,
1.65,
"[a] OSTIA Sea Surface Temperature Anomaly\n"
"[b] (Line) DUACS L4 Sea Level Anomaly\n"
" (Arrows) ASCAT L3 Wind Stress Anomaly",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax3.text(
0.6,
1.65,
"Clim: GODAS 1981-2010\n"
"Clim: DUACS L4 1993-2010\n"
"Clim: ASCAT - ERA adjusted 2008-2014\n",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0,
-0.3,
"[c] ARGO Vertical Temperature Anomaly\n"
"[d] ARGO Vertical Practical Salinity",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0.6,
-0.3,
"Clim: IMARPE 1981-2020",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0,
-0.15,
"Processing: IGP",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
fontsize=9,
)
f_ax0.text(
1,
-0.15,
f"Latest Date: {pd.to_datetime(latest_date.data):%d-%b-%Y}",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
f_ax0.text(
1,
-0.4,
f"*All plots shown are 30-day average of data points\n within 200nm from the coast",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_anom_{depth}.png"))
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_anom_{depth}.jpeg"), dpi=200)
if __name__ == "__main__":
### LOAD DATASETS ###
OUTPUT = "/data/users/service/ARGO/FLOATS/output/ARGO-plots"
# Date and region bounds
region = [-90, -70, -20, -2.5]
today = datetime.datetime.today()
idate | |
cmds.nodeType(input_value) == 'multiplyDivide':
new_multi.append(input_value)
if new_multi:
multi = new_multi
if not new_multi:
multi = []
attributes = self._get_message_attribute_with_prefix('multiply')
for attribute in attributes:
input_attr = attr.get_attribute_input('%s.%s' % (self.pose_control, attribute), node_only = True)
if input_attr:
inputs = attr.get_inputs(input_attr, node_only = True)
if not inputs:
multiplies.append(input_attr)
return multiplies
def set_input(self, attribute):
"""
Set the input into the weightInput of the no reader.
No readers need to have a connection specified that tells the pose when to turn on.
Args:
attribute (str): The node.attribute name of a connection to feed into the no reader.
"""
pass
def add_pose(self, pose_name):
self._connect_pose(pose_name)
pose_inst = get_pose_instance(pose_name, self.pose_gr)
if pose_inst.get_type() == 'no reader':
pose_inst.set_weight(1)
def get_pose_index(self, pose):
attributes = self._get_pose_string_attributes()
inc = 0
for attribute in attributes:
stored_pose = self._get_named_string_attribute(attribute)
if stored_pose == pose:
return inc
inc += 1
def remove_pose(self, pose_name):
index = self.get_pose_index(pose_name)
pose = self.get_pose(index)
if index == None:
return
if pose != pose_name:
return
attributes = self._get_pose_string_attributes()
attribute = attributes[index]
attr.disconnect_attribute('%s.%s' % (self.pose_control, attribute))
cmds.setAttr('%s.pose%s' % (self.pose_control, (index+1)), '', type = 'string')
self.refresh_multiply_connections()
def get_pose(self, index):
if index == None:
return
pose_attributes = self._get_pose_string_attributes()
if not pose_attributes:
return
if index > (len(pose_attributes)-1):
return
pose = cmds.getAttr('%s.%s' % (self.pose_control, pose_attributes[index]))
return pose
def get_poses(self):
pose_count = self._get_pose_count()
poses = []
for pose_index in range(0, pose_count):
poses.append(self.get_pose(pose_index))
return poses
def refresh_multiply_connections(self):
self._disconnect_multiplies()
self._connect_multiplies()
def attach(self, outputs = None):
#super(PoseNoReader, self).attach(outputs)
if outputs:
self.reconnect_weight_outputs(outputs)
self.refresh_multiply_connections()
self._hide_meshes()
if self.sub_detach_dict:
for key in self.sub_detach_dict:
pose = get_pose_instance(key)
pose.attach(self.sub_detach_dict[pose])
self.sub_detach_dict = {}
def detach(self):
#super(PoseNoReader, self).detach()
self._disconnect_multiplies()
outputs = self.disconnect_weight_outputs()
self._show_meshes()
return outputs
def set_weight(self, value):
"""
Set the weight for no readers in the combo.
No readers have connections specified.
If no connection is specified and connected, this can set the weight.
Args:
value (float): The value to set the weight to.
"""
poses = self.get_poses()
for pose in poses:
pose_inst = get_pose_instance(pose, self.pose_gr)
if pose_inst:
pose_type = pose_inst.get_type()
if pose_type == 'no reader':
pose_inst.set_weight(value)
class PoseCone(PoseBase):
"""
This type of pose reads from a joint or transform, for the defined angle of influence.
"""
def __init__(self, transform = None, description = 'pose'):
super(PoseCone, self).__init__(description)
if transform:
transform = transform.replace(' ', '_')
self.transform = transform
self.axis = 'X'
def _pose_type(self):
return 'cone'
def _get_color_for_axis(self):
if self.axis == 'X':
return 13
if self.axis == 'Y':
return 14
if self.axis == 'Z':
return 6
def _get_axis_rotation(self):
if self.axis == 'X':
return [0,0,-90]
if self.axis == 'Y':
return [0,0,0]
if self.axis == 'Z':
return [90,0,0]
def _get_twist_axis(self):
if self.axis == 'X':
return [0,1,0]
if self.axis == 'Y':
return [1,0,0]
if self.axis == 'Z':
return [1,0,0]
def _get_pose_axis(self):
if self.axis == 'X':
return [1,0,0]
if self.axis == 'Y':
return [0,1,0]
if self.axis == 'Z':
return [0,0,1]
def _create_pose_control(self):
pose_control = super(PoseCone, self)._create_pose_control()
self._position_control(pose_control)
if self.transform:
match = space.MatchSpace(self.transform, pose_control)
match.translation_rotation()
parent = cmds.listRelatives(self.transform, p = True)
if parent:
cmds.parentConstraint(parent[0], pose_control, mo = True)
cmds.setAttr('%s.parent' % pose_control, parent[0], type = 'string')
return pose_control
def _position_control(self, control = None):
if not control:
control = self.pose_control
control = rigs_util.Control(control)
control.set_curve_type('pin_point')
control.rotate_shape(*self._get_axis_rotation())
scale = self.scale + 5
control.scale_shape(scale,scale,scale)
control.color( self._get_color_for_axis() )
def _set_axis_vectors(self, pose_axis = None):
if not pose_axis:
pose_axis = self._get_pose_axis()
self._lock_axis_vector_attributes(False)
cmds.setAttr('%s.axisRotateX' % self.pose_control, pose_axis[0])
cmds.setAttr('%s.axisRotateY' % self.pose_control, pose_axis[1])
cmds.setAttr('%s.axisRotateZ' % self.pose_control, pose_axis[2])
twist_axis = self._get_twist_axis()
cmds.setAttr('%s.axisTwistX' % self.pose_control, twist_axis[0])
cmds.setAttr('%s.axisTwistY' % self.pose_control, twist_axis[1])
cmds.setAttr('%s.axisTwistZ' % self.pose_control, twist_axis[2])
self._lock_axis_vector_attributes(True)
def _lock_axis_vector_attributes(self, bool_value):
axis = ['X','Y','Z']
attributes = ['axisTwist', 'axisRotate']
for a in axis:
for attribute in attributes:
cmds.setAttr('%s.%s%s' % (self.pose_control, attribute, a), l = bool_value)
def _create_attributes(self, control):
super(PoseCone, self)._create_attributes(control)
cmds.addAttr(control, ln = 'translation', at = 'double', k = True, dv = 1)
cmds.addAttr(control, ln = 'rotation', at = 'double', k = True, dv = 1)
cmds.addAttr(control, ln = 'twistOffOn', at = 'double', k = True, dv = 1, min = 0, max = 1)
cmds.addAttr(control, ln = 'maxDistance', at = 'double', k = True, dv = 1)
cmds.addAttr(control, ln = 'maxAngle', at = 'double', k = True, dv = 90)
cmds.addAttr(control, ln = 'maxTwist', at = 'double', k = True, dv = 90)
title = attr.MayaEnumVariable('AXIS_ROTATE')
title.create(control)
pose_axis = self._get_pose_axis()
cmds.addAttr(control, ln = 'axisRotateX', at = 'double', k = True, dv = pose_axis[0])
cmds.addAttr(control, ln = 'axisRotateY', at = 'double', k = True, dv = pose_axis[1])
cmds.addAttr(control, ln = 'axisRotateZ', at = 'double', k = True, dv = pose_axis[2])
title = attr.MayaEnumVariable('AXIS_TWIST')
title.create(control)
twist_axis = self._get_twist_axis()
cmds.addAttr(control, ln = 'axisTwistX', at = 'double', k = True, dv = twist_axis[0])
cmds.addAttr(control, ln = 'axisTwistY', at = 'double', k = True, dv = twist_axis[1])
cmds.addAttr(control, ln = 'axisTwistZ', at = 'double', k = True, dv = twist_axis[2])
cmds.addAttr(control, ln = 'joint', dt = 'string')
if self.transform:
cmds.setAttr('%s.joint' % control, self.transform, type = 'string')
cmds.addAttr(control, ln = 'parent', dt = 'string')
self._lock_axis_vector_attributes(True)
#--- math nodes
def _create_distance_between(self):
distance_between = self._create_node('distanceBetween')
cmds.connectAttr('%s.worldMatrix' % self.pose_control,
'%s.inMatrix1' % distance_between)
if self.transform:
cmds.connectAttr('%s.worldMatrix' % self.transform,
'%s.inMatrix2' % distance_between)
return distance_between
def _create_multiply_matrix(self, moving_transform, pose_control):
multiply_matrix = self._create_node('multMatrix')
if moving_transform:
cmds.connectAttr('%s.worldMatrix' % moving_transform, '%s.matrixIn[0]' % multiply_matrix)
cmds.connectAttr('%s.worldInverseMatrix' % pose_control, '%s.matrixIn[1]' % multiply_matrix)
return multiply_matrix
def _create_vector_matrix(self, multiply_matrix, vector):
vector_product = self._create_node('vectorProduct')
cmds.connectAttr('%s.matrixSum' % multiply_matrix, '%s.matrix' % vector_product)
cmds.setAttr('%s.input1X' % vector_product, vector[0])
cmds.setAttr('%s.input1Y' % vector_product, vector[1])
cmds.setAttr('%s.input1Z' % vector_product, vector[2])
cmds.setAttr('%s.operation' % vector_product, 3)
return vector_product
def _create_angle_between(self, vector_product, vector):
angle_between = self._create_node('angleBetween')
cmds.connectAttr('%s.outputX' % vector_product, '%s.vector1X' % angle_between)
cmds.connectAttr('%s.outputY' % vector_product, '%s.vector1Y' % angle_between)
cmds.connectAttr('%s.outputZ' % vector_product, '%s.vector1Z' % angle_between)
cmds.setAttr('%s.vector2X' % angle_between, vector[0])
cmds.setAttr('%s.vector2Y' % angle_between, vector[1])
cmds.setAttr('%s.vector2Z' % angle_between, vector[2])
return angle_between
def _remap_value_angle(self, angle_between):
remap = self._create_node('remapValue', 'angle')
cmds.connectAttr('%s.angle' % angle_between, '%s.inputValue' % remap)
cmds.setAttr('%s.value[0].value_Position' % remap, 0)
cmds.setAttr('%s.value[0].value_FloatValue' % remap, 1)
cmds.setAttr('%s.value[1].value_Position' % remap, 1)
cmds.setAttr('%s.value[1].value_FloatValue' % remap, 0)
cmds.setAttr('%s.inputMax' % remap, 180)
return remap
def _remap_value_distance(self, distance_between):
remap = self._create_node('remapValue', 'distance')
cmds.connectAttr('%s.distance' % distance_between, '%s.inputValue' % remap)
cmds.setAttr('%s.value[0].value_Position' % remap, 0)
cmds.setAttr('%s.value[0].value_FloatValue' % remap, 1)
cmds.setAttr('%s.value[1].value_Position' % remap, 1)
cmds.setAttr('%s.value[1].value_FloatValue' % remap, 0)
cmds.setAttr('%s.inputMax' % remap, 1)
return remap
def _fix_remap_value_distance(self):
input_value = attr.get_attribute_input('%s.translation' % self.pose_control, node_only = True)
key_input = attr.get_attribute_input('%s.input' % input_value)
if key_input:
return
if not cmds.objExists('remapValue3'):
distance = self._get_named_message_attribute('distanceBetween1')
remap = self._remap_value_distance(distance)
input_value = attr.get_attribute_input('%s.translation' % self.pose_control, node_only = True)
if input_value:
if cmds.nodeType(input_value).startswith('animCurve'):
cmds.connectAttr('%s.outValue' % remap, '%s.input' % input_value)
def _multiply_remaps(self, remap, remap_twist):
multiply = self._create_node('multiplyDivide')
cmds.connectAttr('%s.outValue' % remap, '%s.input1X' % multiply)
cmds.connectAttr('%s.outValue' % remap_twist, '%s.input2X' % multiply)
blend = self._create_node('blendColors')
cmds.connectAttr('%s.outputX' % multiply, '%s.color1R' % blend)
cmds.connectAttr('%s.outValue' % remap, '%s.color2R' % blend)
cmds.connectAttr('%s.twistOffOn' % self.pose_control, ' %s.blender' % blend)
return blend
def _create_pose_math_nodes(self, multiply_matrix, axis):
vector_product = self._create_vector_matrix(multiply_matrix, axis)
angle_between = self._create_angle_between(vector_product, axis)
if self._get_pose_axis() == axis:
cmds.connectAttr('%s.axisRotateX' % self.pose_control, '%s.input1X' % vector_product)
cmds.connectAttr('%s.axisRotateY' % self.pose_control, '%s.input1Y' % vector_product)
cmds.connectAttr('%s.axisRotateZ' % self.pose_control, '%s.input1Z' % vector_product)
cmds.connectAttr('%s.axisRotateX' % self.pose_control, '%s.vector2X' % angle_between)
cmds.connectAttr('%s.axisRotateY' % self.pose_control, '%s.vector2Y' % angle_between)
cmds.connectAttr('%s.axisRotateZ' % self.pose_control, '%s.vector2Z' % angle_between)
if self._get_twist_axis() == axis:
cmds.connectAttr('%s.axisTwistX' % self.pose_control, '%s.input1X' % vector_product)
| |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import json
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Tuple, Union
from monai.bundle.config_item import ComponentLocator, ConfigComponent, ConfigExpression, ConfigItem
from monai.bundle.reference_resolver import ReferenceResolver
from monai.bundle.utils import ID_SEP_KEY, MACRO_KEY
from monai.config import PathLike
from monai.utils import ensure_tuple, look_up_option, optional_import
yaml, _ = optional_import("yaml")
__all__ = ["ConfigParser"]
class ConfigParser:
"""
The primary configuration parser. It traverses a structured config (in the form of nested Python dict or list),
creates ``ConfigItem``, and assign unique IDs according to the structures.
This class provides convenient access to the set of ``ConfigItem`` of the config by ID.
A typical workflow of config parsing is as follows:
- Initialize ``ConfigParser`` with the ``config`` source.
- Call ``get_parsed_content()`` to get expected component with `id`.
.. code-block:: python
from monai.bundle import ConfigParser
config = {
"my_dims": 2,
"dims_1": "$@my_dims + 1",
"my_xform": {"_target_": "LoadImage"},
"my_net": {"_target_": "BasicUNet", "spatial_dims": "@dims_1", "in_channels": 1, "out_channels": 4},
"trainer": {"_target_": "SupervisedTrainer", "network": "@my_net", "preprocessing": "@my_xform"}
}
# in the example $@my_dims + 1 is an expression, which adds 1 to the value of @my_dims
parser = ConfigParser(config)
# get/set configuration content, the set method should happen before calling parse()
print(parser["my_net"]["in_channels"]) # original input channels 1
parser["my_net"]["in_channels"] = 4 # change input channels to 4
print(parser["my_net"]["in_channels"])
# instantiate the network component
parser.parse(True)
net = parser.get_parsed_content("my_net", instantiate=True)
print(net)
# also support to get the configuration content of parsed `ConfigItem`
trainer = parser.get_parsed_content("trainer", instantiate=False)
print(trainer)
Args:
config: input config source to parse.
excludes: when importing modules to instantiate components,
excluding components from modules specified in ``excludes``.
globals: pre-import packages as global variables to ``ConfigExpression``,
so that expressions, for example, ``"$monai.data.list_data_collate"`` can use ``monai`` modules.
The current supported globals and alias names are
``{"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"}``.
These are MONAI's minimal dependencies.
See also:
- :py:class:`monai.bundle.ConfigItem`
- :py:class:`monai.bundle.scripts.run`
"""
suffixes = ("json", "yaml", "yml")
suffix_match = rf".*\.({'|'.join(suffixes)})"
path_match = rf"({suffix_match}$)"
meta_key = "_meta_" # field key to save metadata
def __init__(
self,
config: Any = None,
excludes: Optional[Union[Sequence[str], str]] = None,
globals: Optional[Dict[str, Any]] = None,
):
self.config = None
self.globals: Dict[str, Any] = {}
globals = {"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"} if globals is None else globals
if globals is not None:
for k, v in globals.items():
self.globals[k] = importlib.import_module(v) if isinstance(v, str) else v
self.locator = ComponentLocator(excludes=excludes)
self.ref_resolver = ReferenceResolver()
if config is None:
config = {self.meta_key: {}}
self.set(config=config)
def __repr__(self):
return f"{self.config}"
def __getitem__(self, id: Union[str, int]):
"""
Get the config by id.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if id == "":
return self.config
config = self.config
for k in str(id).split(self.ref_resolver.sep):
if not isinstance(config, (dict, list)):
raise ValueError(f"config must be dict or list for key `{k}`, but got {type(config)}: {config}.")
indexing = k if isinstance(config, dict) else int(k)
config = config[indexing]
return config
def __setitem__(self, id: Union[str, int], config: Any):
"""
Set config by ``id``. Note that this method should be used before ``parse()`` or ``get_parsed_content()``
to ensure the updates are included in the parsed content.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
config: config to set at location ``id``.
"""
if id == "":
self.config = config
self.ref_resolver.reset()
return
keys = str(id).split(self.ref_resolver.sep)
# get the last parent level config item and replace it
last_id = self.ref_resolver.sep.join(keys[:-1])
conf_ = self[last_id]
indexing = keys[-1] if isinstance(conf_, dict) else int(keys[-1])
conf_[indexing] = config
self.ref_resolver.reset()
return
def get(self, id: str = "", default: Optional[Any] = None):
"""
Get the config by id.
Args:
id: id to specify the expected position. See also :py:meth:`__getitem__`.
default: default value to return if the specified ``id`` is invalid.
"""
try:
return self[id]
except KeyError:
return default
def set(self, config: Any, id: str = ""):
"""
Set config by ``id``. See also :py:meth:`__setitem__`.
"""
self[id] = config
def parse(self, reset: bool = True):
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
Then recursively parse the config source, add every item as ``ConfigItem`` to the reference resolver.
Args:
reset: whether to reset the ``reference_resolver`` before parsing. Defaults to `True`.
"""
if reset:
self.ref_resolver.reset()
self.resolve_macro()
self._do_parse(config=self.get())
def get_parsed_content(self, id: str = "", **kwargs):
"""
Get the parsed result of ``ConfigItem`` with the specified ``id``.
- If the item is ``ConfigComponent`` and ``instantiate=True``, the result is the instance.
- If the item is ``ConfigExpression`` and ``eval_expr=True``, the result is the evaluated output.
- Else, the result is the configuration content of `ConfigItem`.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
kwargs: additional keyword arguments to be passed to ``_resolve_one_item``.
Currently support ``reset`` (for parse), ``instantiate`` and ``eval_expr``. All defaulting to True.
"""
if not self.ref_resolver.is_resolved():
# not parsed the config source yet, parse it
self.parse(kwargs.get("reset", True))
return self.ref_resolver.get_resolved_content(id=id, **kwargs)
def read_meta(self, f: Union[PathLike, Sequence[PathLike], Dict], **kwargs):
"""
Read the metadata from specified JSON or YAML file.
The metadata as a dictionary will be stored at ``self.config["_meta_"]``.
Args:
f: filepath of the metadata file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as metadata.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
self.set(self.load_config_files(f, **kwargs), self.meta_key)
def read_config(self, f: Union[PathLike, Sequence[PathLike], Dict], **kwargs):
"""
Read the config from specified JSON or YAML file.
The config content in the `self.config` dictionary.
Args:
f: filepath of the config file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as config.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
content = {self.meta_key: self.get(self.meta_key, {})}
content.update(self.load_config_files(f, **kwargs))
self.set(config=content)
def _do_resolve(self, config: Any):
"""
Recursively resolve the config content to replace the macro tokens with target content.
The macro tokens start with "%", can be from another structured file, like:
``{"net": "%default_net"}``, ``{"net": "%/data/config.json#net"}``.
Args:
config: input config file to resolve.
"""
if isinstance(config, (dict, list)):
for k, v in enumerate(config) if isinstance(config, list) else config.items():
config[k] = self._do_resolve(v)
if isinstance(config, str) and config.startswith(MACRO_KEY):
path, ids = ConfigParser.split_path_id(config[len(MACRO_KEY) :])
parser = ConfigParser(config=self.get() if not path else ConfigParser.load_config_file(path))
return self._do_resolve(config=deepcopy(parser[ids]))
return config
def resolve_macro(self):
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
The macro tokens are marked as starting with "%", can be from another structured file, like:
``"%default_net"``, ``"%/data/config.json#net"``.
"""
self.set(self._do_resolve(config=deepcopy(self.get())))
def _do_parse(self, config, id: str = ""):
"""
Recursively parse the nested data in config source, add every item as `ConfigItem` to the resolver.
Args:
config: config source to parse.
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested | |
import loaddata
import pokemon_regression
import pokemon_stat_analysis
import pokemon_test_are_dragons_taller
import pokemon_normal_dist_and_actual_vals
separator_char = ", "
separator = '---------------------------------------------------------------'
tab: str = "\t"
def do_normal_dist_against_actual_values(options):
data_set, type_set, stat_set = options[0], options[1], options[2]
if data_set == "1": # all pokemon
set_name = "Pokemon"
modifier = ''
# grass pokemon
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.grass_types['total_points']
stat_stats = loaddata.grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['hp']
stat_stats = loaddata.grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['speed']
stat_stats = loaddata.grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['attack']
stat_stats = loaddata.grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['defense']
stat_stats = loaddata.grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['sp_attack']
stat_stats = loaddata.grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['sp_defense']
stat_stats = loaddata.grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.grass_types['height_m']
stat_stats = loaddata.grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.grass_types['weight_kg']
stat_stats = loaddata.grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.fire_types['total_points']
stat_stats = loaddata.fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['hp']
stat_stats = loaddata.fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['speed']
stat_stats = loaddata.fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['attack']
stat_stats = loaddata.fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['defense']
stat_stats = loaddata.fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['sp_attack']
stat_stats = loaddata.fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['sp_defense']
stat_stats = loaddata.fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.fire_types['height_m']
stat_stats = loaddata.fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.fire_types['weight_kg']
stat_stats = loaddata.fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.water_types['total_points']
stat_stats = loaddata.water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.water_types['hp']
stat_stats = loaddata.water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.water_types['speed']
stat_stats = loaddata.water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.water_types['attack']
stat_stats = loaddata.water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.water_types['defense']
stat_stats = loaddata.water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.water_types['sp_attack']
stat_stats = loaddata.water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.water_types['sp_defense']
stat_stats = loaddata.water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.water_types['height_m']
stat_stats = loaddata.water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.water_types['weight_kg']
stat_stats = loaddata.water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.electric_types['total_points']
stat_stats = loaddata.electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['hp']
stat_stats = loaddata.electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['speed']
stat_stats = loaddata.electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['attack']
stat_stats = loaddata.electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['defense']
stat_stats = loaddata.electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['sp_attack']
stat_stats = loaddata.electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['sp_defense']
stat_stats = loaddata.electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.electric_types['height_m']
stat_stats = loaddata.electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.electric_types['weight_kg']
stat_stats = loaddata.electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.psychic_types['total_points']
stat_stats = loaddata.psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['hp']
stat_stats = loaddata.psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['speed']
stat_stats = loaddata.psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['attack']
stat_stats = loaddata.psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['defense']
stat_stats = loaddata.psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['sp_attack']
stat_stats = loaddata.psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['sp_defense']
stat_stats = loaddata.psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.psychic_types['height_m']
stat_stats = loaddata.psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.psychic_types['weight_kg']
stat_stats = loaddata.psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == "6":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.ice_types['total_points']
stat_stats = loaddata.ice_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['hp']
stat_stats = loaddata.ice_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['speed']
stat_stats = loaddata.ice_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['attack']
stat_stats = loaddata.ice_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['defense']
stat_stats = loaddata.ice_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['sp_attack']
stat_stats = loaddata.ice_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['sp_defense']
stat_stats = loaddata.ice_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.ice_types['height_m']
stat_stats = loaddata.ice_types['height_m'].describe()
unit = '(m)'
elif stat_set == | |
import requests
import xml.etree.ElementTree as ET
from typing import List
from typing import Union
from datetime import date
from datetime import datetime
from pysec.parser import EDGARParser
# https://www.sec.gov/cgi-bin/srch-edgar?text=form-type%3D%2810-q*+OR+10-k*%29&first=2020&last=2020
class EDGARQuery():
def __init__(self):
"""Initalizes the EDGAR Client with the different endpoints used."""
# base URL for the SEC EDGAR browser
self.sec_url = "https://www.sec.gov"
self.archive_service = "https://www.sec.gov/Archives/edgar"
self.browse_service = "https://www.sec.gov/cgi-bin/browse-edgar"
self.issuer_service = "https://www.sec.gov/cgi-bin/own-disp"
self.search_service = "https://www.sec.gov/cgi-bin/srch-edgar"
self.series_service = "https://www.sec.gov/cgi-bin/series"
self.current_service = "https://www.sec.gov/cgi-bin/current"
self.sec_cgi_endpoint = "https://www.sec.gov/cgi-bin"
self.cik_lookup = 'cik_lookup'
self.mutal_fund_search = 'series'
self.parser_client = EDGARParser()
def get_sec_datasets(self) -> dict:
"""Grabs all the Public datasets provided by the SEC.
Returns:
----
dict: A collection of SEC datasets.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> sec_datasets = edgar_client.get_sec_datasets()
{
"@context": "https://project-open-data.cio.gov/v1.1/schema/catalog.jsonld",
"@id": "https://www.sec.gov/data.json",
"@type": "dcat:Catalog",
"conformsTo": "https://project-open-data.cio.gov/v1.1/schema",
"describedBy": "https://project-open-data.cio.gov/v1.1/schema/catalog.json",
"dataset": []
}
"""
# Make the request.
response = requests.get(
url='https://www.sec.gov/data.json'
)
if response.ok:
return response.json()
def get_edgar_taxonomies(self) -> dict:
"""Grabs all the Public taxonomies datasets provided by the SEC.
Returns:
----
dict: A collection of Taxonomy files for the SEC.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> sec_datasets = edgar_client.get_edgar_taxonomies()
[
{
'AttType': 'SCH',
'Elements': '0',
'Family': 'BASE',
'FileTypeName': 'Schema',
'Href': 'http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd',
'Namespace': 'http://www.xbrl.org/2003/linkbase',
'Prefix': 'link',
'Version': '2010'
},
{
'AttType': 'SCH',
'Elements': '0',
'Family': 'BASE',
'FileTypeName': 'Schema',
'Href': 'http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd',
'Namespace': 'http://www.xbrl.org/2003/instance',
'Prefix': 'xbrli',
'Version': '2010'
}
]
"""
# Make the request.
response = requests.get(
url='https://www.sec.gov/info/edgar/edgartaxonomies.xml'
)
# Parse the response.
taxonomies = self.parser_client.parse_loc_elements(
response_text=response.text
)
return taxonomies
def company_directories(self, cik: str) -> dict:
"""Grabs all the filing directories for a company.
Overview:
----
Companies often file many SEC disclosures, so this endpoint
makes grabbing all the endpoints associated with a company
easy, by only requiring the CIK number.
Arguments:
----
cik {str} -- The company CIK number, defined by the SEC.
Returns:
----
dict -- A Dictionary containing the directory filings path.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> company_filings = edgar_client.company_directories(cik='1265107')
[
{
'last-modified': '2019-07-02 12:27:42',
'name': '000000000019010655',
'size': '',
'type': 'folder.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000000000019010655/index.json'
},
{
'last-modified': '2019-07-01 17:17:26',
'name': '000110465919038688',
'size': '',
'type': 'folder.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/index.json'
}
]
"""
# Build the URL.
url = self.archive_service + "/data/{cik_number}/index.json".format(
cik_number=cik
)
cleaned_directories = []
directories = requests.get(url=url).json()
# Loop through each item.
for directory in directories['directory']['item']:
# Create the URL.
directory['url'] = self.archive_service + "/data/{cik_number}/{directory_id}/index.json".format(
cik_number=cik,
directory_id=directory['name']
)
directory['filing_id'] = directory.pop('name')
directory['last_modified'] = directory.pop('last-modified')
cleaned_directories.append(directory)
return cleaned_directories
def company_directory(self, cik: str, filing_id: str) -> dict:
"""Grabs all the items from a specific filing.
Overview:
----
The SEC organizes filings by CIK number which represent a single
entity. Each entity can have multiple filings, which is identified
by a filing ID. That filing can contain multiple items in it.
This endpoint will return all the items from a specific filing that
belongs to a single company.
Arguments:
----
cik {str} -- The company CIK number, defined by the SEC.
filing_id {str} -- The ID of filing to pull.
Returns:
----
dict -- A Dictionary containing the filing items.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> company_filings = edgar_client.company_directory(cik='1265107', filing_id='000110465919038688')
[
{
'item_id': '0001104659-19-038688.txt',
'last_modified': '2019-07-01 17:17:26',
'size': '',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/0001104659-19-038688.txt'
},
{
'item_id': 'a19-12321_2425.htm',
'last_modified': '2019-07-01 17:17:26',
'size': '37553',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/a19-12321_2425.htm'
}
]
"""
url = self.archive_service + "/data/{cik_number}/{filing_number}/index.json".format(
cik_number=cik,
filing_number=filing_id
)
cleaned_items = []
directory = requests.get(url=url).json()
for item in directory['directory']['item']:
item['url'] = self.archive_service + "/data/{cik_number}/{directory_id}/{file_id}".format(
cik_number=cik,
directory_id=filing_id,
file_id=item['name']
)
item['item_id'] = item.pop('name')
item['last_modified'] = item.pop('last-modified')
cleaned_items.append(item)
return cleaned_items
def company_filings_by_type(self, cik: str, filing_type: str) -> List[dict]:
"""Returns all the filings of certain type for a particular company.
Arguments:
----
cik {str} -- The company CIK Number.
filing_type {str} -- The filing type ID.
Returns:
----
dict -- A Dictionary containing the filing items.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> company_filings = edgar_client.company_directory(cik='1265107', filing_id='000110465919038688')
[
{
'item_id': '0001104659-19-038688.txt',
'last_modified': '2019-07-01 17:17:26',
'size': '',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/0001104659-19-038688.txt'
},
{
'item_id': 'a19-12321_2425.htm',
'last_modified': '2019-07-01 17:17:26',
'size': '37553',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/a19-12321_2425.htm'
}
]
"""
# Set the params
params = {
'action': 'getcompany',
'CIK': cik,
'type': filing_type,
'output': 'atom'
}
# Grab the response.
response = requests.get(
url=self.browse_service,
params=params
)
# Parse the entries.
entries = self.parser_client.parse_entries(entries_text=response.text)
return entries
def companies_by_state(self, state: str, num_of_companies: int = None) -> List[dict]:
"""Returns all the companies that fall under a given state.
Arguments:
----
state {str} -- [description]
Returns:
----
List[dict] -- [description]
"""
# define the arguments of the request
search_sic_params = {
'State': state,
'Count': '100',
'action': 'getcompany',
'output': 'atom'
}
response = requests.get(
url=self.browse_service,
params=search_sic_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(
entries_text=response.text,
num_of_items=num_of_companies
)
return entries
def companies_by_country(self, country: str, num_of_companies: int = None) -> List[dict]:
"""Grabs all the companies that fall under a particular country code.
Arguments:
----
country {str} -- The country code.
Keyword Arguments:
----
num_of_companies {int} -- If you would like to limit the number of results, then
specify the number of companies you want back. (default: {None})
Returns:
----
List[dict] -- A list of Entry resources.
"""
# define the arguments of the request
search_sic_params = {
'Country': country,
'Count': '100',
'action': 'getcompany',
'output': 'atom'
}
# Grab the Response.
response = requests.get(
url=self.browse_service,
params=search_sic_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(
entries_text=response.text,
num_of_items=num_of_companies
)
return entries
def companies_by_sic(self, sic_code: str, num_of_companies: int = None, start: int = None) -> List[dict]:
"""Grabs all companies with a certain SIC code.
Returns all companies, that fall under a particular SIC code. The information returned
by this endpoint depends on the infromation available on the company.
Arguments:
----
sic_code {str} -- The SIC code for a particular Industry.
Keyword Arguments:
----
num_of_companies {int} -- If you would like to limit the number of results, then
specify the number of companies you want back. (default: {None})
start {int} -- Specifies the starting company number. (default: {None})
Returns:
----
list[dict] -- A list of companies with the following attributes:
[
{
"state": "MN",
"cik": "0000066740",
"last-date": "",
"name": "<NAME>",
"sic-code": "3841",
"id": "urn:tag:www.sec.gov:cik=0000066740",
"href": "URL",
"type": "html",
"summary": "<strong>CIK:</strong> 0000066740, <strong>State:</strong> MN",
"title": "3M CO",
"updated": "2020-04-05T15:21:24-04:00",
"atom_owner_only": "URL",
"atom_owner_exclude": "URL",
"atom_owner_include": "URL",
"html_owner_only": "URL",
"html_owner_exclude": "URL",
"html_owner_include": "URL",
"atom_owner_only_filtered_date": "URL",
"atom_owner_exclude_filtered_date": "URL",
"atom_owner_include_filtered_date": "URL",
"html_owner_only_filtered_date": "URL",
"html_owner_exclude_filtered_date": "URL",
"html_owner_include_filtered_date": "URL",
}
]
"""
if not start:
start = 0
# define the arguments of the request
search_sic_params = {
'Count': '100',
'SIC': sic_code,
'Count': '100',
'action': 'getcompany',
'output': 'atom',
'start': start
}
# Make the response.
response = requests.get(
url=self.browse_service,
params=search_sic_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(
entries_text=response.text,
num_of_items=num_of_companies,
start=start
)
return entries
def ownership_filings_by_cik(self, cik: str, before: Union[str, date] = None, after: Union[str, date] = None) -> List[dict]:
"""Returns all the ownership filings for a given CIK number in a given date range.
Arguments:
----
cik {str} -- The CIK number of the company to be queried.
Keyword Arguments:
----
before {Union[str, date]} -- Represents filings that you want before a certain
date. For example, "2019-12-01" means return all the filings BEFORE
Decemeber 1, 2019. (default: {None})
after {Union[str, date]} -- Represents filings that you want after a certain
date. For example, "2019-12-01" means return all the filings AFTER
Decemeber 1, 2019. (default: {None})
Returns:
----
List[dict] -- A list of ownership filings.
"""
# define the arguments of the request
search_params = {
'CIK': cik,
'Count': '100',
'myowner': 'only',
'action': 'getcompany',
'output': 'atom',
'datea': after,
'dateb': before
}
# Make the response.
response = requests.get(
url=self.browse_service,
params=search_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(entries_text=response.text)
return entries
def non_ownership_filings_by_cik(self, cik: str, before: str = None, after: str = None) -> List[dict]:
"""Returns all the non-ownership filings for a given CIK number in a given date range.
Arguments:
----
cik {str} -- The CIK number of the company to be queried.
Keyword Arguments:
----
before {Union[str, date]} -- Represents filings that you want before a | |
appropriately loaded!")
return self.__init_blank_net
@abc.abstractmethod
def remove_before_save(self) -> _TypeBuffer:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def reload_after_save(self, data: _TypeBuffer, /) -> None:
raise NotImplementedError("Abstract method!")
# ----------------------------------------------------------------------------------------------
@final
def redraw_current_net(self) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__current_net = self._create_current_net()
@final
def merge_net_model(self, model: NetModelInterface, /) -> None:
if not isinstance(model, SimpleNetCon):
raise KnownSimpleAnnError(
f"Expected {SimpleNetCon.__name__} got {type(model).__name__}"
)
self.__current_net = deepcopy(model.current_net)
@final
def re_copy_current_net(self) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__buffered_best_net = deepcopy(self.current_net)
self.__init_blank_net = deepcopy(self.current_net)
@final
def re_init_current_net(self, new_net: CurrentNetData, /) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__current_net = deepcopy(new_net)
self.__buffered_best_net = deepcopy(new_net)
self.__init_blank_net = deepcopy(new_net)
@final
def update_current_net(self, fitness: float, /) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
old_fitness = self.buffered_best_net.fitness
self.__current_net.fitness = fitness
if fitness <= old_fitness:
self.__buffered_best_net = deepcopy(self.__current_net)
@final
def reset_current_net(self) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
self.__current_net = deepcopy(self.init_blank_net)
@final
def set_best_net(self) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
self.__current_net = deepcopy(self.buffered_best_net)
@final
@property
def get_net_com(self) -> nn.Module:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__current_net.com
@final
@property
def get_net_lego(self) -> nn.Module:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__current_net.lego
@final
def save(self) -> Tuple[
bytes, Tuple[CurrentNetData, CurrentNetData, CurrentNetData], _TypeBuffer
]:
cr_net = self.current_net
if not isinstance(cr_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
buf_net = self.buffered_best_net
self.__current_net = (buf_net.fitness, buf_net.com.state_dict(), buf_net.lego.state_dict())
init_net = self.init_blank_net
self.__buffered_best_net = None
self.__init_blank_net = None
rem_buf = self.remove_before_save()
erg = (
rick.dumps(self, protocol=rick.HIGHEST_PROTOCOL),
(cr_net, buf_net, init_net), rem_buf
)
return erg
@final
def save_complete(self, saved_net: Tuple[CurrentNetData, ...],
saved_buf: _TypeBuffer, /) -> None:
if isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately saved!")
if len(saved_net) != 3:
raise KnownSimpleAnnError(f"Expected saved_net tuple length 3 got {len(saved_net)}!")
for elem in saved_net:
if not isinstance(elem, CurrentNetData):
raise KnownSimpleAnnError(f"Expected CurrentNetData got {type(elem).__name__}!")
self.__current_net = saved_net[0]
self.__buffered_best_net = saved_net[1]
self.__init_blank_net = saved_net[2]
self.reload_after_save(saved_buf)
@final
def load_tuple_dict_stats(self, data: Tuple[float, Dict, Dict],
extra_args: InitContainer, /) -> None:
self.__current_net = self._create_current_loaded_net(extra_args)
self.__current_net.fitness = data[0]
self.__current_net.com.load_state_dict(data[1])
self.__current_net.com.eval()
self.__current_net.lego.load_state_dict(data[2])
self.__current_net.lego.eval()
self.__buffered_best_net = deepcopy(self.__current_net)
self.__init_blank_net = deepcopy(self.__current_net)
@classmethod
@final
def load(cls, data: bytes, extra_args: InitContainer, /) -> 'SimpleNetCon':
if not isinstance(extra_args, InitContainer):
raise KnownSimpleAnnError(
f"Expected args to be {InitContainer.__name__} got {type(extra_args).__name__}!"
)
loaded_net = rick.loads(data)
if not isinstance(loaded_net, SimpleNetCon):
raise KnownSimpleAnnError(
f"Expected bytes to be {SimpleNetCon.__name__} got {type(loaded_net).__name__}!"
)
loaded_tuple = loaded_net.current_net
if not isinstance(loaded_tuple, tuple):
raise KnownSimpleAnnError(
f"Expected tuple got {type(loaded_tuple).__name__}!"
)
if len(loaded_tuple) != 3:
raise KnownSimpleAnnError(
f"Expected tuple to have 3 elements got {len(loaded_tuple)}!"
)
if not (isinstance(loaded_tuple[0], float)
and isinstance(loaded_tuple[1], dict)
and isinstance(loaded_tuple[2], dict)):
raise KnownSimpleAnnError("Received wrong typed tuple!")
casted_tuple = (
float(loaded_tuple[0]),
{**loaded_tuple[1]},
{**loaded_tuple[2]}
)
loaded_net.load_tuple_dict_stats(casted_tuple, extra_args)
return loaded_net
@final
@dataclass
class _SimpleANNCon:
test_data: Optional[Tuple[Dataset, ...]] = None
train_data: Optional[Tuple[Dataset, ...]] = None
eval_data: Optional[Tuple[Dataset, ...]] = None
stop_op_fp: Optional[Path] = None
is_trainable: Tuple[bool, bool] = (True, False)
def _unlink_if_exists(file_p: Path, /) -> None:
if file_p.exists() and file_p.is_file():
file_p.unlink()
@final
class DataSetTypes(Enum):
TRAIN = 'TrainData'
TEST = 'TestData'
EVAL = 'EvalData'
def _move_data_to_shared_mem(data_t: Optional[Tuple[Dataset, ...]],
smm: SharedMemoryManager, /) -> None:
if data_t is not None:
for data in data_t:
if isinstance(data, DataSetSharedMemoryA):
data.move_data_to_shared_memory(smm)
class SimpleAnnNet(
NodeANNDataElemInterface[nn.Module, CurrentNetData, _TypeBuffer, InitContainer],
abc.ABC
):
def __init__(self, args: InitNetArgs, /) -> None:
super().__init__()
self.__arguments_con = args
self.__data_container = _SimpleANNCon()
self.__savable: Optional[
NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]
] = None
self.__net_module: Optional[SimpleNetCon] = None
self.__data_name = "NotSet"
@final
def get_node_name(self) -> str:
return self.__data_name
@final
def set_node_name(self, name: str) -> None:
self.__data_name = name
@final
def _move_data_sets_to_shared_memory(self, smm: Optional[SharedMemoryManager], /) -> None:
if smm is not None:
_move_data_to_shared_mem(self.__data_container.train_data, smm)
_move_data_to_shared_mem(self.__data_container.eval_data, smm)
@abc.abstractmethod
def re_read_data(self, data_type: DataSetTypes, /) -> Optional[Tuple[Dataset, ...]]:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def check_net_state(self) -> NetGeneralState:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def check_init_state(self) -> InitState:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def get_truth_fun_id(self) -> str:
raise NotImplementedError("Abstract method!")
@final
def stop_file_it_min(self, it_cnt: int, runt_time_min: int, /) -> bool:
return (
it_cnt < self.arguments_con.hyper_optim_wr.stop_iterations
or not self.arguments_con.hyper_optim_wr.stop_iterations
) and (
self.stop_file is None
or (self.stop_file.exists() and self.stop_file.is_file())
) and (
runt_time_min < self.arguments_con.hyper_optim_wr.stop_time_min
or not self.arguments_con.hyper_optim_wr.stop_time_min
)
@final
@property
def stop_file(self) -> Optional[Path]:
return self.__data_container.stop_op_fp
@final
def stop_file_set(self, file_p: Optional[Path], /) -> None:
if file_p is not None and file_p.exists() and file_p.is_file():
self.__data_container.stop_op_fp = file_p
@final
@property
def arguments_con(self) -> InitNetArgs:
return self.__arguments_con
@final
def is_trainable(self) -> bool:
return self.retrain and not self.random_net
@final
@property
def retrain(self) -> bool:
return self.__data_container.is_trainable[0]
@final
def retrain_set(self, retrain: bool, /) -> None:
self.__data_container.is_trainable = (retrain, self.__data_container.is_trainable[1])
@final
@property
def random_net(self) -> bool:
return self.__data_container.is_trainable[1]
@final
def random_net_set(self, random_net: bool, /) -> None:
self.__data_container.is_trainable = (self.__data_container.is_trainable[0], random_net)
@final
@property
def test_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.test_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.TEST)
if temp_data is not None:
self.test_data_set(temp_data)
return self.__data_container.test_data
@final
def test_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given test data set was empty")
self.__data_container.test_data = data
@final
@property
def train_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.train_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.TRAIN)
if temp_data is not None:
self.train_data_set(temp_data)
return self.__data_container.train_data
@final
def train_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given train data set was empty")
self.__data_container.train_data = data
@final
@property
def eval_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.eval_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.EVAL)
if temp_data is not None:
self.eval_data_set(temp_data)
return self.__data_container.eval_data
@final
def eval_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given eval data set was empty")
self.__data_container.eval_data = data
@final
@property
def savable(self) -> \
Optional[NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]]:
return self.__savable
@final
def savable_set(self, savable: NetSavable[
nn.Module, CurrentNetData, _TypeBuffer, InitContainer
], /) -> None:
self.__savable = savable
@final
def get_savable_data(self) -> NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]:
if self.__savable is None:
raise KnownSimpleAnnError("Net was not initialised!")
return self.__savable
@final
@property
def net_module(self) -> Optional[SimpleNetCon]:
return self.__net_module
@final
def net_module_set(self, module: SimpleNetCon, /) -> None:
if self.__net_module is not None:
raise KnownSimpleAnnError("Net was already initialised!")
self.__net_module = module
@final
def get_savable_net(self) -> SimpleNetCon:
if self.__net_module is None:
raise KnownSimpleAnnError("Net was not initialised!")
return self.__net_module
@final
def _update_hyper_run(self, hyper_cont: HyperOptimInterfaceArgs,
new_params: Dict[str, HyperOptimReturnElem], /) -> None:
self.get_savable_net().reset_current_net()
self._update_hyper(hyper_cont, new_params)
@final
def _update_hyper(self, hyper_cont: HyperOptimInterfaceArgs,
new_params: Dict[str, HyperOptimReturnElem], /) -> None:
update_hyper_params(self.get_savable_net(), self.arguments_con, new_params)
update_hyper_container(self.arguments_con, hyper_cont)
@final
def _create_train_interface(self, id_file: ANNTreeIdType,
copy: bool, id_mod: str, /) -> TrainerInterfaceArgs:
if self.arguments_con.net_state.get_kwargs().redraw:
self.get_savable_net().redraw_current_net()
if copy:
buf = self.get_savable_net().remove_before_save()
new_mod = deepcopy(self.get_savable_net())
self.get_savable_net().reload_after_save(buf)
else:
new_mod = self.get_savable_net()
new_train_args = TrainerInterfaceArgs(
module=new_mod,
input_train=self.train_data,
input_eval=self.eval_data,
id_file=deepcopy(id_file),
dump=self.arguments_con.net_state.get_kwargs().dump,
cuda=self.arguments_con.net_state.get_kwargs().cuda,
optimizer=deepcopy(self.arguments_con.optimizer_wr)
if copy else self.arguments_con.optimizer_wr,
scheduler=deepcopy(self.arguments_con.scheduler_wr)
if copy else self.arguments_con.scheduler_wr,
criterion=deepcopy(self.arguments_con.criterion_wr)
if copy else self.arguments_con.criterion_wr,
truth_fun_id=self.get_truth_fun_id(),
hyper_str=create_hyper_param_str(self.get_node_name(), self.arguments_con)
)
if id_mod:
new_train_args.id_file.add_modifier(id_mod)
return new_train_args
@final
def _create_stop_file(self, id_file: ANNTreeIdType, /) -> Optional[Path]:
if self.arguments_con.hyper_optim_wr is not None \
and self.arguments_con.hyper_optim_wr.stop_file is not None \
and self.arguments_con.hyper_optim_wr.stop_file.exists() \
and self.arguments_con.hyper_optim_wr.stop_file.is_dir():
merged_str = \
f"{id_file.id_merged_str}_{datetime.now().strftime('%d_%m_%Y__%H_%M_%S')}.lock"
stop_file = self.arguments_con.hyper_optim_wr.stop_file.joinpath(merged_str)
stop_file.touch()
atexit.register(_unlink_if_exists, stop_file)
return stop_file
return None
def _get_new_params(self, generator_optim: HGenTA, fixed_params: _TrFitParam,
run_cont: _RunningConst, /) -> List[Dict[str, HyperOptimReturnElem]]:
run_cnt = 0
l_new_params: List[Dict[str, HyperOptimReturnElem]] = []
while run_cnt < 10 and not l_new_params:
run_cnt += 1
try:
l_new_params = generator_optim.send(fixed_params)
except StopIteration:
run_cont.running = False
run_cnt = 10
else:
run_cont.running = self.stop_file_it_min(run_cont.run_id, run_cont.run_time_min)
if not l_new_params:
run_cont.running = False
return l_new_params
def _train_single(self, sync_out: SyncStdoutInterface, run_cont: _RunningConst,
hyper_cont: HyperOptimInterfaceArgs,
id_file: ANNTreeIdType, /) -> Iterable[TrainNNStatsElementType]:
if self.arguments_con.hyper_optim_wr is None:
raise KnownSimpleAnnError("Hyper-optimiser is not defined!")
generator_optim = self.arguments_con.hyper_optim_wr.hyper.hyper_optim(
sync_out, hyper_cont
)
try:
l_new_params: List[Dict[str, HyperOptimReturnElem]] = next(generator_optim)
except StopIteration:
raise KnownSimpleAnnError("Generator could not be started!")
while run_cont.running:
tr_fit: _TrFitAl = ([], [])
trainer_args = []
for param_id, new_param in enumerate(l_new_params):
run_cont.hyper_cont_buffer = deepcopy(hyper_cont)
self.arguments_con.prepare_wr.init_prepare()
self._update_hyper_run(run_cont.hyper_cont_buffer, new_param)
yield from self.arguments_con.prepare_wr.prepare.run_train(
sync_out, PrepareInterfaceArgs(
trainer=deepcopy(self.arguments_con.trainer_wr.trainer),
trainer_args=self._create_train_interface(
id_file, False, str(run_cont.run_id + param_id)
)
)
)
re_copy_model(
self.arguments_con.prepare_wr.prepare.p_state_dict,
self.get_savable_net().get_net_com
)
tr_fit_res = self.arguments_con.prepare_wr.prepare.fitness
tr_fit[0].append((tr_fit_res[0], _create_hyper_params(run_cont.hyper_cont_buffer)))
tr_fit[1].append(tr_fit_res[1])
trainer_args.append(run_cont.hyper_cont_buffer)
self.get_savable_net().update_current_net(tr_fit_res[0])
run_cont.fit_plotter.update_fitness(tr_fit, trainer_args)
self._update_hyper(hyper_cont, run_cont.fit_plotter.bets_fit_h_param[1])
| |
if is_zero(Hvec*Vvec + Hconst):
incidence_matrix[Vindex, Hindex] = 1
# A ray or line is considered incident with a hyperplane,
# if it is orthogonal to the normal vector of the hyperplane.
for Vvec, Vindex in Vvectors_rays_lines:
if is_zero(Hvec*Vvec):
incidence_matrix[Vindex, Hindex] = 1
incidence_matrix.set_immutable()
return incidence_matrix
@cached_method
def slack_matrix(self):
r"""
Return the slack matrix.
The entries correspond to the evaluation of the Hrepresentation
elements on the Vrepresentation elements.
.. NOTE::
The columns correspond to inequalities/equations in the
order :meth:`Hrepresentation`, the rows correspond to
vertices/rays/lines in the order
:meth:`Vrepresentation`.
.. SEEALSO::
:meth:`incidence_matrix`.
EXAMPLES::
sage: P = polytopes.cube()
sage: P.slack_matrix()
[0 2 2 2 0 0]
[0 0 2 2 0 2]
[0 0 0 2 2 2]
[0 2 0 2 2 0]
[2 2 0 0 2 0]
[2 2 2 0 0 0]
[2 0 2 0 0 2]
[2 0 0 0 2 2]
sage: P = polytopes.cube(intervals='zero_one')
sage: P.slack_matrix()
[0 1 1 1 0 0]
[0 0 1 1 0 1]
[0 0 0 1 1 1]
[0 1 0 1 1 0]
[1 1 0 0 1 0]
[1 1 1 0 0 0]
[1 0 1 0 0 1]
[1 0 0 0 1 1]
sage: P = polytopes.dodecahedron().faces(2)[0].as_polyhedron()
sage: P.slack_matrix()
[1/2*sqrt5 - 1/2 0 0 1 1/2*sqrt5 - 1/2 0]
[ 0 0 1/2*sqrt5 - 1/2 1/2*sqrt5 - 1/2 1 0]
[ 0 1/2*sqrt5 - 1/2 1 0 1/2*sqrt5 - 1/2 0]
[ 1 1/2*sqrt5 - 1/2 0 1/2*sqrt5 - 1/2 0 0]
[1/2*sqrt5 - 1/2 1 1/2*sqrt5 - 1/2 0 0 0]
sage: P = Polyhedron(rays=[[1, 0], [0, 1]])
sage: P.slack_matrix()
[0 0]
[0 1]
[1 0]
TESTS::
sage: Polyhedron().slack_matrix()
[]
sage: Polyhedron(base_ring=QuadraticField(2)).slack_matrix().base_ring()
Number Field in a with defining polynomial x^2 - 2 with a = 1.41...
"""
if not self.n_Vrepresentation() or not self.n_Hrepresentation():
slack_matrix = matrix(self.base_ring(), self.n_Vrepresentation(),
self.n_Hrepresentation(), 0)
else:
Vrep_matrix = matrix(self.base_ring(), self.Vrepresentation())
Hrep_matrix = matrix(self.base_ring(), self.Hrepresentation())
# Getting homogeneous coordinates of the Vrepresentation.
hom_helper = matrix(self.base_ring(), [1 if v.is_vertex() else 0 for v in self.Vrepresentation()])
hom_Vrep = hom_helper.stack(Vrep_matrix.transpose())
slack_matrix = (Hrep_matrix * hom_Vrep).transpose()
slack_matrix.set_immutable()
return slack_matrix
def base_ring(self):
"""
Return the base ring.
OUTPUT:
The ring over which the polyhedron is defined. Must be a
sub-ring of the reals to define a polyhedron, in particular
comparison must be defined. Popular choices are
* ``ZZ`` (the ring of integers, lattice polytope),
* ``QQ`` (exact arithmetic using gmp),
* ``RDF`` (double precision floating-point arithmetic), or
* ``AA`` (real algebraic field).
EXAMPLES::
sage: triangle = Polyhedron(vertices = [[1,0],[0,1],[1,1]])
sage: triangle.base_ring() == ZZ
True
"""
return self.parent().base_ring()
def backend(self):
"""
Return the backend used.
OUTPUT:
The name of the backend used for computations. It will be one of
the following backends:
* ``ppl`` the Parma Polyhedra Library
* ``cdd`` CDD
* ``normaliz`` normaliz
* ``polymake`` polymake
* ``field`` a generic Sage implementation
EXAMPLES::
sage: triangle = Polyhedron(vertices = [[1, 0], [0, 1], [1, 1]])
sage: triangle.backend()
'ppl'
sage: D = polytopes.dodecahedron()
sage: D.backend()
'field'
sage: P = Polyhedron([[1.23]])
sage: P.backend()
'cdd'
"""
return self.parent().backend()
@cached_method
def center(self):
"""
Return the average of the vertices.
.. SEEALSO::
:meth:`representative_point`.
OUTPUT:
The center of the polyhedron. All rays and lines are
ignored. Raises a ``ZeroDivisionError`` for the empty
polytope.
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: p = p + vector([1,0,0])
sage: p.center()
(1, 0, 0)
"""
if self.dim() == 0:
return self.vertices()[0].vector()
else:
vertex_sum = vector(self.base_ring(), [0]*self.ambient_dim())
for v in self.vertex_generator():
vertex_sum += v.vector()
vertex_sum.set_immutable()
return vertex_sum / self.n_vertices()
@cached_method(do_pickle=True)
def centroid(self, engine='auto', **kwds):
r"""
Return the center of the mass of the polytope.
The mass is taken with respect to the induced Lebesgue measure,
see :meth:`volume`.
If the polyhedron is not compact, a ``NotImplementedError`` is
raised.
INPUT:
- ``engine`` -- either 'auto' (default), 'internal',
'TOPCOM', or 'normaliz'. The 'internal' and 'TOPCOM' instruct
this package to always use its own triangulation algorithms
or TOPCOM's algorithms, respectively. By default ('auto'),
TOPCOM is used if it is available and internal routines otherwise.
- ``**kwds`` -- keyword arguments that are passed to the
triangulation engine (see :meth:`triangulate`).
OUTPUT: The centroid as vector.
ALGORITHM:
We triangulate the polytope and find the barycenter of the simplices.
We add the individual barycenters weighted by the fraction of the total
mass.
EXAMPLES::
sage: P = polytopes.hypercube(2).pyramid()
sage: P.centroid()
(1/4, 0, 0)
sage: P = polytopes.associahedron(['A',2])
sage: P.centroid()
(2/21, 2/21)
sage: P = polytopes.permutahedron(4, backend='normaliz') # optional - pynormaliz
sage: P.centroid() # optional - pynormaliz
(5/2, 5/2, 5/2, 5/2)
The method is not implemented for unbounded polyhedra::
sage: P = Polyhedron(vertices=[(0,0)],rays=[(1,0),(0,1)])
sage: P.centroid()
Traceback (most recent call last):
...
NotImplementedError: the polyhedron is not compact
The centroid of an empty polyhedron is not defined::
sage: Polyhedron().centroid()
Traceback (most recent call last):
...
ZeroDivisionError: rational division by zero
TESTS::
sage: Polyhedron(vertices=[[0,1]]).centroid()
(0, 1)
"""
if not self.is_compact():
raise NotImplementedError("the polyhedron is not compact")
if self.n_vertices() == self.dim() + 1:
# The centroid of a simplex is its center.
return self.center()
triangulation = self.triangulate(engine=engine, **kwds)
if self.ambient_dim() == self.dim():
pc = triangulation.point_configuration()
else:
from sage.geometry.triangulation.point_configuration import PointConfiguration
A, b = self.affine_hull_projection(as_affine_map=True, orthogonal=True, orthonormal=True, extend=True)
pc = PointConfiguration((A(v.vector()) for v in self.Vrep_generator()))
barycenters = [sum(self.Vrepresentation(i).vector() for i in simplex)/(self.dim() + 1) for simplex in triangulation]
volumes = [pc.volume(simplex) for simplex in triangulation]
centroid = sum(volumes[i]*barycenters[i] for i in range(len(volumes)))/sum(volumes)
if self.ambient_dim() != self.dim():
# By the affine hull projection, the centroid has base ring ``AA``,
# we try return the centroid in a reasonable ring.
try:
return centroid.change_ring(self.base_ring().fraction_field())
except ValueError:
pass
return centroid
@cached_method
def representative_point(self):
"""
Return a "generic" point.
.. SEEALSO::
:meth:`center`.
OUTPUT:
A point as a coordinate vector. The point is chosen to be
interior as far as possible. If the polyhedron is not
full-dimensional, the point is in the relative interior. If
the polyhedron is zero-dimensional, its single point is
returned.
EXAMPLES::
sage: p = Polyhedron(vertices=[(3,2)], rays=[(1,-1)])
sage: p.representative_point()
(4, 1)
sage: p.center()
(3, 2)
sage: Polyhedron(vertices=[(3,2)]).representative_point()
(3, 2)
"""
accumulator = vector(self.base_ring(), [0]*self.ambient_dim())
for v in self.vertex_generator():
accumulator += v.vector()
accumulator /= self.n_vertices()
for r in self.ray_generator():
accumulator += r.vector()
accumulator.set_immutable()
return accumulator
def a_maximal_chain(self):
r"""
Return a maximal chain of the face lattice in increasing order.
EXAMPLES::
sage: P = polytopes.cube()
sage: P.a_maximal_chain()
[A -1-dimensional face of a Polyhedron in ZZ^3,
A 0-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 1 vertex,
A 1-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 2 vertices,
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 4 vertices,
A 3-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 8 vertices]
sage: P = polytopes.cube()
sage: chain = P.a_maximal_chain(); chain
[A -1-dimensional face of a Polyhedron in ZZ^3,
A 0-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 1 vertex,
A 1-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 2 vertices,
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 4 vertices,
A 3-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 8 vertices]
sage: [face.ambient_V_indices() for face in chain]
[(), (5,), (0, 5), (0, 3, 4, 5), (0, 1, 2, 3, 4, 5, 6, 7)]
TESTS::
Check output for the empty polyhedron::
sage: P = Polyhedron()
sage: P.a_maximal_chain()
[A -1-dimensional face of a Polyhedron in ZZ^0]
"""
comb_chain = self.combinatorial_polyhedron().a_maximal_chain()
from sage.geometry.polyhedron.face import combinatorial_face_to_polyhedral_face
empty_face = self.faces(-1)[0]
universe = self.faces(self.dim())[0]
if self.dim() == -1:
return [empty_face]
return [empty_face] + \
[combinatorial_face_to_polyhedral_face(self, face)
for face in comb_chain] + \
[universe]
@cached_method
def radius_square(self):
"""
Return the square of the maximal distance from the
:meth:`center` to a vertex. All rays and lines are ignored.
OUTPUT:
The square of the radius, which is |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 52