content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""
Django settings for Punyadaan_Website project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-=7!#uo@n6l4aboay(tg_!b=mb#r=6=q)xq8@x!(z3jb2!95h_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Punyadaan_Website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Punyadaan_Website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.793388 | 91 | 0.697853 | [
"Apache-2.0"
] | dushyant1singh1/Punyadaan-Website | Punyadaan_Website/Punyadaan_Website/settings.py | 3,121 | Python |
# *- coding: utf-8 -*
# Created by: ZhaoDongshuang
# Created on: 2018/1/27
import unittest
from others.survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
def setUp(self):
question = "What language did you first learn to speak?"
self.my_survey = AnonymousSurvey(question)
self.responses = ['English', 'Spanish', 'Mandarin']
def test_store_single_response(self):
self.my_survey.store_response(self.responses[0])
self.assertIn(self.responses[0], self.my_survey.responses)
def test_store_three_responses(self):
for response in self.responses:
self.my_survey.store_response(response)
for response in self.responses:
self.assertIn(response, self.my_survey.responses)
if __name__ == '__main__':
unittest.main()
| 28.724138 | 66 | 0.696279 | [
"Apache-2.0"
] | imtoby/LearnPythonRecord | others/test_survey.py | 833 | Python |
import unittest, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
# test some random csv data, and some lineend combinations
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
# believe the interesting thing is the NaN in the csv
def test_A_parse3(self):
parseResult = h2i.import_parse(bucket='smalldata', path='parse3.csv', schema='put')
h2o_cmd.runRF(parseResult=parseResult, trees=37, timeoutSecs=10)
if __name__ == '__main__':
h2o.unit_main()
| 28.709677 | 91 | 0.673034 | [
"Apache-2.0"
] | jhsrcmh/h2o | py/testdir_single_jvm/notest_parse3.py | 890 | Python |
import torch
import time
from math import pi
import numpy as np
from os.path import join
from ..utils.tensorboard import Tensorboard
from ..utils.output import progress
from .convergence import Convergence
from ..model.deepmod import DeepMoD
from typing import Optional
def train(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
test = 'mse',
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(MSE + Reg)
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(MSE_test + Reg_test)
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
# ================== Sparsity update =============
# Updating sparsity and or convergence
#sparsity_scheduler(iteration, l1_norm)
if iteration % write_iterations == 0:
if test == 'mse':
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
# ================= Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
if log_dir is None:
path = 'model.pt'
else:
path = join(log_dir, 'model.pt')
torch.save(model.state_dict(), path)
def train_multitask(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
test = 'mse',
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(torch.exp(-model.s[:, 0]) * MSE + torch.exp(-model.s[:, 1]) * Reg + torch.sum(model.s))
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(torch.exp(-model.s[:, 0]) * MSE_test + torch.exp(-model.s[:, 1]) * Reg_test + torch.sum(model.s))
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s)
# ================== Sparsity update =============
# Updating sparsity and or convergence
#sparsity_scheduler(iteration, l1_norm)
if iteration % write_iterations == 0:
if test == 'mse':
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
# ================= Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
if log_dir is None:
path = 'model.pt'
else:
path = join(log_dir, 'model.pt')
torch.save(model.state_dict(), path)
def train_multitask_capped(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
test = 'mse',
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
#cutoff = torch.full((model.func_approx.architecture[-1], 1), 1e-5).to(target.device)
cutoff = torch.tensor(15.).to(target.device)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2, dim=0)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
s_capped = torch.min(torch.max(model.s, -cutoff), cutoff)
loss = torch.sum(torch.exp(-s_capped[:, 0]) * MSE + torch.exp(-s_capped[:, 1]) * Reg + torch.sum(s_capped))
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(torch.exp(-s_capped[:, 0]) * MSE_test + torch.exp(-s_capped[:, 1]) * Reg_test + torch.sum(s_capped))
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s)
# ================== Sparsity update =============
# Updating sparsity and or convergence
#sparsity_scheduler(iteration, l1_norm)
if iteration % write_iterations == 0:
if test == 'mse':
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
# ================= Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
if log_dir is None:
path = 'model.pt'
else:
path = join(log_dir, 'model.pt')
torch.save(model.state_dict(), path)
def train_gradnorm(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
alpha,
test = 'mse',
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""[summary]
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
convergence = Convergence(**convergence_kwargs)
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.cat([torch.mean((dt - theta @ coeff_vector)**2, dim=0)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
task_loss = (torch.exp(model.weights) * torch.stack((MSE, Reg), axis=1)).flatten() # weighted losses
loss = torch.sum(task_loss)
if iteration == 0: # Getting initial loss
ini_loss = task_loss.data
if torch.any(task_loss.data > ini_loss):
ini_loss[task_loss.data > ini_loss] = task_loss.data[task_loss.data > ini_loss]
# Getting original grads
optimizer.zero_grad()
loss.backward(retain_graph=True)
model.weights.grad.data = model.weights.grad.data * 0.0 # setting weight grads to zero
# Getting Grads to normalize
G = torch.tensor([torch.norm(torch.autograd.grad(loss_i, list(model.parameters())[-2], retain_graph=True, create_graph=True)[0], 2) for loss_i in task_loss]).to(data.device)
G_mean = torch.mean(G)
# Calculating relative losses
rel_loss = task_loss / ini_loss
inv_train_rate = rel_loss / torch.mean(rel_loss)
# Calculating grad norm loss
grad_norm_loss = torch.sum(torch.abs(G - G_mean * inv_train_rate ** alpha))
# Setting grads
model.weights.grad = torch.autograd.grad(grad_norm_loss, model.weights)[0]
# do a step with the optimizer
optimizer.step()
# renormalize
normalize_coeff = task_loss.shape[0] / torch.sum(model.weights)
model.weights.data = torch.log(torch.exp(model.weights.data) * normalize_coeff)
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = model.weights @ torch.stack((MSE, Reg), axis=0)
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, w=model.weights)
# ================== Sparsity update =============
# Updating sparsity and or convergence
#sparsity_scheduler(iteration, l1_norm)
if iteration % write_iterations == 0:
if test == 'mse':
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
else:
sparsity_scheduler(iteration, loss_test, model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
sparsity_scheduler.reset()
# ================= Checking convergence
convergence(iteration, torch.sum(l1_norm))
if convergence.converged is True:
print('Algorithm converged. Stopping training.')
break
board.close()
if log_dir is None:
path = 'model.pt'
else:
path = join(log_dir, 'model.pt')
torch.save(model.state_dict(), path)
def train_SBL(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
extra_params,
sparsity_scheduler,
split = 0.8,
exp_ID: str = None,
log_dir: str = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""Trains the DeepMoD model. This function automatically splits the data set in a train and test set.
Args:
model (DeepMoD): A DeepMoD object.
data (torch.Tensor): Tensor of shape (n_samples x (n_spatial + 1)) containing the coordinates, first column should be the time coordinate.
target (torch.Tensor): Tensor of shape (n_samples x n_features) containing the target data.
optimizer ([type]): Pytorch optimizer.
sparsity_scheduler ([type]): Decides when to update the sparsity mask.
split (float, optional): Fraction of the train set, by default 0.8.
exp_ID (str, optional): Unique ID to identify tensorboard file. Not used if log_dir is given, see pytorch documentation.
log_dir (str, optional): Directory where tensorboard file is written, by default None.
max_iterations (int, optional): [description]. Max number of epochs , by default 10000.
write_iterations (int, optional): [description]. Sets how often data is written to tensorboard and checks train loss , by default 25.
"""
logger = Logger(exp_ID, log_dir)
sparsity_scheduler.path = logger.log_dir # write checkpoint to same folder as tb output.
t, a, l = extra_params
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
M = 12
N = data_train.shape[0]
threshold = 1e4
# Training
convergence = Convergence(**convergence_kwargs)
for iteration in torch.arange(0, max_iterations):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
tau_ = torch.exp(t)
alpha_ = torch.min(torch.exp(a), torch.tensor(1e8, dtype=torch.float32))
lambda_ = torch.min(torch.exp(l), torch.tensor(2e4, dtype=torch.float32))
y = time_derivs[0]
X = thetas[0] / torch.norm(thetas[0], dim=0, keepdim=True)
p_MSE = N / 2 * (tau_ * torch.mean((prediction - target_train)**2, dim=0) - t + np.log(2*np.pi))
A = torch.diag(lambda_) + alpha_ * X.T @ X
mn = (lambda_ < threshold)[:, None] * (alpha_ * torch.inverse(A) @ X.T @ y)
E = alpha_ * torch.sum((y - X @ mn)**2) + mn.T @ torch.diag(lambda_) @ mn
p_reg = 1/2 * (E + torch.sum(torch.log(torch.diag(A)[lambda_ < threshold])) - (torch.sum(l[lambda_ < threshold]) + N * a) - N * np.log(2*np.pi))
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(p_MSE + p_reg)
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
with torch.no_grad():
prediction_test = model.func_approx(data_test)[0]
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating estimator coeffs but not setting mask
logger(iteration,
loss, MSE, Reg,
model.constraint_coeffs(sparse=True, scaled=True),
model.constraint_coeffs(sparse=True, scaled=False),
model.estimator_coeffs(),
MSE_test=MSE_test,
p_MSE = p_MSE,
p_reg = p_reg,
tau = tau_,
alpha=alpha_,
lambda_=lambda_,
mn=mn)
# ================== Sparsity update =============
# Updating sparsity
update_sparsity = sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if update_sparsity:
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
# ================= Checking convergence
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)))
converged = convergence(iteration, l1_norm)
if converged:
break
logger.close(model) | 48.967433 | 263 | 0.58734 | [
"MIT"
] | GJBoth/MultiTaskPINN | src/multitaskpinn/training/training.py | 25,561 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import CTransaction, sha256
from test_framework.test_framework import ErexCoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, hex_str_to_bytes
from io import BytesIO
class DecodeScriptTest(ErexCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(multisig_script)))
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(cltv_script)))
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| 71.302128 | 761 | 0.760444 | [
"MIT"
] | Black-NET/erexcoin-source | test/functional/rpc_decodescript.py | 16,756 | Python |
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
import unittest
from opencensus.common import utils
from opencensus.trace.link import Link
from opencensus.trace.span import format_span_json
from opencensus.trace.time_event import TimeEvent
class TestBlankSpan(unittest.TestCase):
@staticmethod
def _get_target_class():
from opencensus.trace.blank_span import BlankSpan
return BlankSpan
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_do_not_crash(self):
span_id = 'test_span_id'
span_name = 'test_span_name'
patch = mock.patch(
'opencensus.trace.blank_span.generate_span_id',
return_value=span_id)
with patch:
span = self._make_one(span_name)
self.assertEqual(span.name, span_name)
self.assertEqual(span.span_id, span_id)
self.assertIsNotNone(span.parent_span)
self.assertIsNotNone(span.parent_span.span())
self.assertEqual(span.attributes, {})
self.assertIsNone(span.start_time)
self.assertIsNone(span.end_time)
self.assertEqual(span.children, [])
self.assertIsNone(span.context_tracer)
span.add_attribute('attribute_key', 'attribute_value')
span.add_annotation('This is a test', name='blank-span')
link = Link(span_id='1234', trace_id='4567')
span.add_link(link)
time_event = mock.Mock()
with self.assertRaises(TypeError):
span.add_time_event(time_event)
time_event = TimeEvent(datetime.datetime.utcnow())
span.add_time_event(time_event)
span_iter_list = list(iter(span))
self.assertEqual(span_iter_list, [span])
expected_span_json = {
'spanId': 'test_span_id',
'startTime': None,
'endTime': None,
'displayName': {
'truncated_byte_count': 0,
'value': 'test_span_name'
},
'childSpanCount': 0,
}
span_json = format_span_json(span)
self.assertEqual(span_json, expected_span_json)
span.start()
span.finish()
def test_constructor_explicit(self):
span_id = 'test_span_id'
span_name = 'test_span_name'
parent_span = mock.Mock()
start_time = utils.to_iso_str()
end_time = utils.to_iso_str()
attributes = {
'http.status_code': '200',
'component': 'HTTP load balancer',
}
time_events = mock.Mock()
links = mock.Mock()
stack_trace = mock.Mock()
status = mock.Mock()
context_tracer = mock.Mock()
span = self._make_one(
name=span_name,
parent_span=parent_span,
attributes=attributes,
start_time=start_time,
end_time=end_time,
span_id=span_id,
stack_trace=stack_trace,
time_events=time_events,
links=links,
status=status,
context_tracer=context_tracer)
self.assertEqual(span.name, span_name)
self.assertIsNotNone(span.span_id)
self.assertEqual(span.attributes, {})
self.assertEqual(span.start_time, start_time)
self.assertEqual(span.end_time, end_time)
self.assertEqual(span.time_events, time_events)
self.assertEqual(span.stack_trace, stack_trace)
self.assertEqual(span.links, [])
self.assertEqual(span.status, status)
self.assertEqual(span.children, [])
self.assertEqual(span.context_tracer, context_tracer)
def test_start(self):
span_name = 'root_span'
span = self._make_one(span_name)
self.assertIsNone(span.start_time)
span.start()
self.assertIsNone(span.start_time)
def test_finish_without_context_tracer(self):
span_name = 'root_span'
span = self._make_one(span_name)
self.assertIsNone(span.end_time)
span.finish()
self.assertIsNone(span.end_time)
def test_finish(self):
span_name = 'root_span'
span = self._make_one(span_name)
self.assertIsNone(span.end_time)
span.finish()
self.assertIsNone(span.end_time)
def test_on_create(self):
from opencensus.trace.blank_span import BlankSpan
self.on_create_called = False
self._make_one('span1')
self.assertFalse(self.on_create_called)
try:
@BlankSpan.on_create
def callback(span):
self.on_create_called = True
self._make_one('span2')
finally:
BlankSpan._on_create_callbacks = []
self.assertFalse(self.on_create_called)
def test_context_manager(self):
span_name = 'root_span'
with self._make_one(span_name) as s:
self.assertIsNotNone(s)
self.assertEqual(s.name, span_name)
| 31.369318 | 74 | 0.638834 | [
"Apache-2.0"
] | kshithijiyer/opencensus-python | tests/unit/trace/test_blank_span.py | 5,521 | Python |
import os
import shutil
cur_dir= os.getcwd()
def make_folder(name): #Checks to see if a folder exists, makes it if it doesn't.
if (os.path.exists('.\\'+name)):
return
else:
os.makedirs('.\\'+name)
def notepad(name):
if os.path.exists(os.path.join(cur_dir,name)) == False:
var_create= open(name,'w')
var= open(name,'r+')
var_data=var.read()
var.seek(0)
return var_data,var
txt_data,txt_file= notepad('clean_folder.txt')
skip = notepad('skip.txt')[0]
'''
os.path.exists(os.path.join(cur_dir,'clean_folder.txt'))==False: #creates clean_folder.txt if it doesn't exist
txt_file_create=open('clean_folder.txt','w')
txt_file= open('clean_folder.txt','r+')
txt_data= txt_file.read()
txt_file.seek(0)
'''
make_folder('error_files')
if os.path.exists("others"): #takes all files in 'others' folders and copies them to current directory
files_others= os.listdir('others')
while len(os.listdir('others')) != 0:
for file in files_others:
if os.path.isdir(file)==False:
try:
shutil.move(os.path.join(cur_dir,'others',file),os.path.join(cur_dir,file))
print ('File',file' moved from others to ',cur_dir)
except OSError:
pass
shutil.move(os.path.join('others',file),'error_files')
else:
unpack(file)
if len(os.listdir('others'))==0:
os.rmdir('others')
print ("test dialogue")
files= [x for x in os.listdir() if os.path.isdir(x)== False and x != 'clean_folder_v4.py' and x!='clean_folder.txt' and x not in skip and x!= "skip.txt"] #list of files(excluding folders) in current directory, excluding this program and its text file
extensions =[os.path.splitext(x)[1] for x in files] #list of extensions of files
print (extensions)
folders_created=[] #list of folders that will be created
for file in files:
extension = os.path.splitext(file)[1] #takes extension of file using splitext
print ('Extensions of file is : ',extension)
if os.path.exists(extension): #sees if that particular file already has a dedicated folder from a previous run of clean_folder
print ("File already has folder")
try:
shutil.move(os.path.join(cur_dir,file),extension) #if yes, it moves it to that folder
except OSError:
pass
shutil.move(os.path.join(cur_dir,file),'error_files')
files.remove(file)
extensions.remove(extension)
else:
print ("Folder will have to be created")
pass
for file in files:
extension = os.path.splitext(file)[1]
if extensions.count(str(extension))>1: #checks to see how many times files of that extension occurs in list 'extensions'
#if more than 1, creates a folder for that extension
print ("File type exists more than once. Creating new folder..")
make_folder(extension)
folders_created.append(extension) #appends to list 'folders_created'
try:
shutil.move(os.path.join(cur_dir,file),extension) #moves file to folder of that extension
print ('File',file,'moved to folder' ,extension)
except OSError:
pass
shutil.move(os.path.join(cur_dir,file),'error_files')
else:
make_folder("others") # if only 1 file of an extension exists, it is put in others folder
folders_created.append("others")
try:
print ('File',file,'moved to others')
shutil.move(os.path.join(cur_dir,file),"others")
except OSError:
pass
shutil.move(os.path.join(cur_dir,file),'error_files')
for folder in folders_created:
txt_file.write(folder+"\n")
txt_file.close()
if len(os.listdir('error_files'))== 0 :
os.rmdir('error_files')
else:
print ("The following files could not be moved due to an unknown error. Please move them manually from folder \'error_files\'")
print (os.listdir('error_files'))
print (files)
os.system('pause')
''' Working:
When the program is run for the first time, it doesn't harm existing folders. Takes stray files and sorts them extension wise
Creates folder for each file type that exists more than once. All the rest are put in 'others' folder
When run the next time, it unpacks 'others' folders, in case a file that has been put there now has another file of same extension.
Same thing is done again. If a file now placed has a folder already dedicated to its extension, it is put there
All the while , it writes the name of the folders created by it in clean_folder.txt . This is so that they can be unpacked
using unpack.py, without harming any other folders.
Any files in skip.txt will not be moved.
'''
| 34.09589 | 251 | 0.634592 | [
"MIT"
] | Satvik2101/Clean-Folder | clean_folder_v4.py | 4,978 | Python |
from datetime import datetime, timedelta
from typing import Callable
def create_batch_list(n_batches: int) -> list:
return list(map(lambda x: x, range(1, n_batches + 1)))
def create_batch_dictionary(batch_lst: list, duration_lst: list, expected_finish_lst: list) -> dict:
batch_dict = {batch_lst[i]: (duration_lst[i], expected_finish_lst[i]) for i in range(0, len(batch_lst))}
return batch_dict
def create_result_batch_dictionary(batch_lst: list, start_datetime_lst: list, delta_time_lst: list) -> dict:
batch_dict = {batch_lst[i]: (start_datetime_lst[i], delta_time_lst[i]) for i in range(0, len(batch_lst))}
return batch_dict
def create_dynamic_ordering_constraint(index: int) -> str:
"""
Creates a valid AMPL constraint of the form:
[LaTex]: $start\_time_j+1 >= start\_time_j + duration_j$, $\forall j \in BATCH$
:param index: j index where the current constraint should start
:return: single AMPL JIT constraint as a string
"""
i = str(index)
i_next = str(index + 1)
constraint_name = f'ordering_{i_next}_{i}'
return f's.t. {constraint_name}: start_time[{i_next}] >= start_time[{i}] + duration[{i}];'
def create_multiple_ordering_constraints(start_index: int, last_index: int) -> str:
constraints = ''
for i in range(start_index, last_index):
constraints += f'{create_dynamic_ordering_constraint(i)}\n'
return constraints
def create_multiple_constraints(start_index: int, last_index: int, create_constraints: Callable[[int, int], str]):
return create_constraints(start_index, last_index)
def dict_to_list(obj: dict) -> list:
"""
Converts a dictionary to a list, extracting the values of the dictionary.
The list is sorted according to the dict's keys ascendant order.
The given dictionary should always have the same numeric keys as the result of create_batch_dictionary().
:param obj: the dictionary to convert which should have numeric keys
:return: the list of values in the dictionary
"""
return list(obj.values())
def strings_to_datetimes(str_date_lst: list, datetime_format: str) -> list:
"""
Converts a list of strings into a list of datetime objects
:param str_date_lst: list of string objects compatible with the ISO8601 format
:param datetime_format: format of the datetime
:return: list of datetime objects equivalent to the given str_date_lst
"""
return [datetime.strptime(d, datetime_format) for d in str_date_lst]
def minute_timedelta(first: datetime, second: datetime) -> int:
"""
Returns the difference expressed in minutes between 2 datetime objects
:param first: datetime object that comes before second
:param second: datetime object that comes after first
:return: difference in minutes between second and first
"""
delta: timedelta = second - first
return divmod(delta.total_seconds(), 60)[0]
def minute_timedeltas_wrt_first(datetime_lst: list) -> list:
"""
Converts a list of datetime objects into a list of minute time deltas with respect to the first item.
For example, given the input datetime_lst:
[
'2019-08-22 14:32',
'2019-08-22 14:38',
'2019-08-22 14:42',
'2019-08-22 14:52',
'2019-08-22 14:57'
],
the result would be:
[32, 38, 42, 52, 57]
:param datetime_lst: list of datetime objects
:return: minute time deltas with respect to the first item of datetime_lst
"""
first_datetime: datetime = datetime_lst[0]
partial_deltas = [minute_timedelta(first=first_datetime, second=v) for v in datetime_lst[1:]]
first_minutes = first_datetime.minute
return [first_minutes] + list(map(lambda x: x + first_minutes, partial_deltas))
def set_minutes_to_datetimes(datetime_lst: list, minutes_lst: list) -> list:
"""
Given a list of minutes and datetime objects, sets each amount of minutes to each datetime object with respect
to the list index. The two lists must have the same size.
:param datetime_lst: list of datetime objects
:param minutes_lst: list of minutes to set to a list of datetime objects
:return: list of datetime objects similar to datetime_lst but shifted according to minutes_lst
"""
return [d.replace(minute=0) + timedelta(minutes=m) for d, m in zip(datetime_lst, minutes_lst)]
def datetimes_to_strings(datetime_lst: list, datetime_format: str) -> list:
"""
Converts a list of datetime objects to strings, according to a certain datetime format.
:param datetime_lst: list of datetime objects to convert to string
:param datetime_format: format of the datetime
:return: the list of datetime objects converted to strings in the given datetime format
"""
return [d.strftime(datetime_format) for d in datetime_lst]
| 40.745763 | 114 | 0.720882 | [
"MIT"
] | jkomyno/amplrestapi | ampljit/utils.py | 4,808 | Python |
from tkinter import messagebox, Tk, Menu, ttk
news = ['Mid Day News', 'Evening News']
features = ['Calling Farmers', 'Round About Ja', 'You and the Law', 'Get the Facts',
'Career Talk', 'Economy and you', 'Arts Page', 'Tourism Roundup',
'Jeep','Jamaica Promise', 'House Matters', 'Jamaica House Weekly']
features.sort()
class CustomMenu(object):
def __init__(self, root, values=[], combo_placement=(0, 0), button_placement=(0, 0), label_placement=(0, 0)):
self.frame = root
self.combobox = ttk.Combobox(self.frame, values=values)
self.combobox.bind("<<>ComboboxSelected>")
self.combobox.grid(row=combo_placement[0], column=combo_placement[1])
self.label = ttk.Label(self.frame, textvariable=self.combobox.get())
self.label.grid(row=label_placement[0], column=label_placement[1])
self.button = ttk.Button(self.frame, text="Add", command=self.update_popup)
self.button.grid(row=button_placement[0], column=button_placement[1])
def update_popup(self):
messagebox.showinfo(
title="File update",
message="{} has been added".format(self.combobox.get())
)
root = Tk()
root.title('Feature Tracking')
root.geometry('255x425')
update_frame = ttk.Frame(root, padding=(5,10))
def show_update_frame():
update_frame.grid(row=0, column=0)
#Update Menu Frame
features_frame = CustomMenu(update_frame, features, (1, 0), (3, 0), (0, 0))
news_frame = CustomMenu(update_frame, news, (4, 0), (5, 0), (6, 0))
#Menu bar with menu options
menubar = Menu(root)
#Update Menu
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label='New', command=show_update_frame)
menubar.add_cascade(label='Update', menu=filemenu)
root.config(menu = menubar)
root.mainloop() | 37.571429 | 114 | 0.661054 | [
"MIT"
] | UncleEngineer/TkinterTrick | 000-combobox1.py | 1,841 | Python |
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def home():
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True) | 14.461538 | 40 | 0.68617 | [
"Apache-2.0"
] | nurmatthias/100DaysOfCode | day48/server.py | 188 | Python |
#
# Copyright 2018 Expedia Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from functions import bootstrap
test_cases = [
{
'sample': [3, 3, 3],
'predstot': 1,
'predsdim': 1,
'len': 1,
'mean': np.float64(3),
'meanInt64': np.int64(3)
},
{
'sample': [2, 2, 2],
'predstot': 1000,
'predsdim': 2,
'len': 1000,
'mean': np.float64(2),
'meanInt64': np.int64(2)
},
{
'sample': [1, 1, 1],
'predstot': 9999,
'predsdim': 3,
'len': 9999,
'mean': np.float64(1),
'meanInt64': np.int64(1)
},
]
results = []
resultsInt64 = []
class BootstrapTestCase(unittest.TestCase):
""" docstring """
def setUp(self):
for i in range(len(test_cases)):
results.append(
bootstrap(
test_cases[i]['sample'],
test_cases[i]['predstot'],
test_cases[i]['predsdim']
)
)
resultsInt64.append(
bootstrap(
test_cases[i]['sample'],
test_cases[i]['predstot'],
test_cases[i]['predsdim'],
True
)
)
def test_len(self):
""" test for boostrap helper"""
for i in range(len(test_cases)):
self.assertEqual(len(results[i]), test_cases[i]['len'])
self.assertEqual(len(resultsInt64[i]), test_cases[i]['len'])
def test_value(self):
""" docstring """
for i in range(len(test_cases)):
for j in range(len(results[i])):
self.assertEqual(results[i][j], test_cases[i]['mean'])
self.assertEqual(resultsInt64[i][j], test_cases[i]['meanInt64'])
self.assertIsInstance(results[i][j], np.float64)
self.assertIsInstance(resultsInt64[i][j], np.int64)
def test_less(self):
""" docstring """
for i in range(len(test_cases)):
for j in range(len(results[i])):
self.assertLessEqual(results[i][j], max(test_cases[i]['sample']))
def test_greater(self):
""" docstring """
for i in range(len(test_cases)):
for j in range(len(results[i])):
self.assertGreaterEqual(results[i][j], min(test_cases[i]['sample']))
if __name__ == '__main__':
unittest.main() | 29.653465 | 84 | 0.543239 | [
"Apache-2.0"
] | ExpediaGroup/neaps | neaps-api/neaps_lib/bootstrap_test.py | 2,995 | Python |
# coding: utf-8
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pprint
import re # noqa: F401
import six
class URL(object):
"""NOTE: This class is auto generated by the swagger code generator program.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'authority': 'str',
'content': 'object',
'default_port': 'int',
'file': 'str',
'host': 'str',
'path': 'str',
'port': 'int',
'protocol': 'str',
'query': 'str',
'ref': 'str',
'user_info': 'str'
}
attribute_map = {
'authority': 'authority',
'content': 'content',
'default_port': 'defaultPort',
'file': 'file',
'host': 'host',
'path': 'path',
'port': 'port',
'protocol': 'protocol',
'query': 'query',
'ref': 'ref',
'user_info': 'userInfo'
}
def __init__(self, authority=None, content=None, default_port=None, file=None, host=None, path=None, port=None, protocol=None, query=None, ref=None, user_info=None): # noqa: E501
"""URL - a model defined in Swagger""" # noqa: E501
self._authority = None
self._content = None
self._default_port = None
self._file = None
self._host = None
self._path = None
self._port = None
self._protocol = None
self._query = None
self._ref = None
self._user_info = None
self.discriminator = None
if authority is not None:
self.authority = authority
if content is not None:
self.content = content
if default_port is not None:
self.default_port = default_port
if file is not None:
self.file = file
if host is not None:
self.host = host
if path is not None:
self.path = path
if port is not None:
self.port = port
if protocol is not None:
self.protocol = protocol
if query is not None:
self.query = query
if ref is not None:
self.ref = ref
if user_info is not None:
self.user_info = user_info
@property
def authority(self):
"""Gets the authority of this URL. # noqa: E501
:return: The authority of this URL. # noqa: E501
:rtype: str
"""
return self._authority
@authority.setter
def authority(self, authority):
"""Sets the authority of this URL.
:param authority: The authority of this URL. # noqa: E501
:type: str
"""
self._authority = authority
@property
def content(self):
"""Gets the content of this URL. # noqa: E501
:return: The content of this URL. # noqa: E501
:rtype: object
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this URL.
:param content: The content of this URL. # noqa: E501
:type: object
"""
self._content = content
@property
def default_port(self):
"""Gets the default_port of this URL. # noqa: E501
:return: The default_port of this URL. # noqa: E501
:rtype: int
"""
return self._default_port
@default_port.setter
def default_port(self, default_port):
"""Sets the default_port of this URL.
:param default_port: The default_port of this URL. # noqa: E501
:type: int
"""
self._default_port = default_port
@property
def file(self):
"""Gets the file of this URL. # noqa: E501
:return: The file of this URL. # noqa: E501
:rtype: str
"""
return self._file
@file.setter
def file(self, file):
"""Sets the file of this URL.
:param file: The file of this URL. # noqa: E501
:type: str
"""
self._file = file
@property
def host(self):
"""Gets the host of this URL. # noqa: E501
:return: The host of this URL. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this URL.
:param host: The host of this URL. # noqa: E501
:type: str
"""
self._host = host
@property
def path(self):
"""Gets the path of this URL. # noqa: E501
:return: The path of this URL. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this URL.
:param path: The path of this URL. # noqa: E501
:type: str
"""
self._path = path
@property
def port(self):
"""Gets the port of this URL. # noqa: E501
:return: The port of this URL. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this URL.
:param port: The port of this URL. # noqa: E501
:type: int
"""
self._port = port
@property
def protocol(self):
"""Gets the protocol of this URL. # noqa: E501
:return: The protocol of this URL. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this URL.
:param protocol: The protocol of this URL. # noqa: E501
:type: str
"""
self._protocol = protocol
@property
def query(self):
"""Gets the query of this URL. # noqa: E501
:return: The query of this URL. # noqa: E501
:rtype: str
"""
return self._query
@query.setter
def query(self, query):
"""Sets the query of this URL.
:param query: The query of this URL. # noqa: E501
:type: str
"""
self._query = query
@property
def ref(self):
"""Gets the ref of this URL. # noqa: E501
:return: The ref of this URL. # noqa: E501
:rtype: str
"""
return self._ref
@ref.setter
def ref(self, ref):
"""Sets the ref of this URL.
:param ref: The ref of this URL. # noqa: E501
:type: str
"""
self._ref = ref
@property
def user_info(self):
"""Gets the user_info of this URL. # noqa: E501
:return: The user_info of this URL. # noqa: E501
:rtype: str
"""
return self._user_info
@user_info.setter
def user_info(self, user_info):
"""Sets the user_info of this URL.
:param user_info: The user_info of this URL. # noqa: E501
:type: str
"""
self._user_info = user_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(URL, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, URL):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.435013 | 183 | 0.53528 | [
"Apache-2.0"
] | CSTC-WTCB-BBRI/python_tb_rest_client | tb_rest_client/models/models_pe/url.py | 9,212 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import json
from abc import abstractmethod
from argparse import ArgumentParser, Namespace
from typing import Any, List, NamedTuple, Optional, Tuple
from idb.cli.commands.base import TargetCommand
from idb.client.client import IdbClient
class NoBundleIdentifierProvidedException(BaseException):
pass
class BundleWithPath(NamedTuple):
bundle_id: Optional[str]
path: str
@classmethod
def parse(cls, argument: str) -> "BundleWithPath":
split = argument.split(sep=":", maxsplit=1)
if len(split) == 1:
return BundleWithPath(bundle_id=None, path=split[0])
return BundleWithPath(bundle_id=split[0], path=split[1])
def _extract_bundle_id(args: Namespace) -> str:
if args.bundle_id:
return args.bundle_id
values = []
for value in vars(args).values():
if isinstance(value, List):
values.extend(value)
else:
values.append(value)
for value in values:
if not isinstance(value, BundleWithPath):
continue
bundle_id = value.bundle_id
if bundle_id is None:
continue
args.bundle_id = bundle_id
return args.bundle_id
raise NoBundleIdentifierProvidedException(f"No Bundle ID Provided in args {args}")
def _convert_args(args: Namespace) -> Tuple[Namespace, str]:
def convert_value(value: Any) -> Any: # pyre-ignore
if isinstance(value, List):
return [convert_value(x) for x in value]
return value.path if isinstance(value, BundleWithPath) else value
bundle_id = _extract_bundle_id(args)
args = Namespace(
**{
key: convert_value(value)
for (key, value) in vars(args).items()
if key != "bundle_id"
}
)
return (args, bundle_id)
class FSCommand(TargetCommand):
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--bundle-id",
help="Bundle ID of application. Must be provi",
type=str,
required=False,
default=None,
)
super().add_parser_arguments(parser)
@abstractmethod
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
pass
async def run_with_client(self, args: Namespace, client: IdbClient) -> None:
(args, bundle_id) = _convert_args(args)
return await self.run_with_bundle(bundle_id=bundle_id, args=args, client=client)
class FSListCommand(FSCommand):
@property
def description(self) -> str:
return "List a path inside an application's container"
@property
def name(self) -> str:
return "list"
@property
def aliases(self) -> List[str]:
return ["ls"]
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"path", help="Source path", default="./", type=BundleWithPath.parse
)
super().add_parser_arguments(parser)
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
paths = await client.ls(bundle_id=bundle_id, path=args.path)
if args.json:
print(json.dumps([{"path": item.path} for item in paths]))
else:
for item in paths:
print(item.path)
class FSMkdirCommand(FSCommand):
@property
def description(self) -> str:
return "Make a directory inside an application's container"
@property
def name(self) -> str:
return "mkdir"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
super().add_parser_arguments(parser)
parser.add_argument(
"path", help="Path to directory to create", type=BundleWithPath.parse
)
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
await client.mkdir(bundle_id=bundle_id, path=args.path)
class FSMoveCommand(FSCommand):
@property
def description(self) -> str:
return "Move a path inside an application's container"
@property
def name(self) -> str:
return "move"
@property
def aliases(self) -> List[str]:
return ["mv"]
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"src",
help="Source paths relative to Container",
nargs="+",
type=BundleWithPath.parse,
)
parser.add_argument(
"dst",
help="Destination path relative to Container",
type=BundleWithPath.parse,
)
super().add_parser_arguments(parser)
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
await client.mv(bundle_id=bundle_id, src_paths=args.src, dest_path=args.dst)
class FSRemoveCommand(FSCommand):
@property
def description(self) -> str:
return "Remove an item inside a container"
@property
def name(self) -> str:
return "remove"
@property
def aliases(self) -> List[str]:
return ["rm"]
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"path",
help="Path of item to remove (A directory will be recursively deleted)",
nargs="+",
type=BundleWithPath.parse,
)
super().add_parser_arguments(parser)
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
await client.rm(bundle_id=bundle_id, paths=args.path)
class FSPushCommand(FSCommand):
@property
def description(self) -> str:
return "Copy file(s) from local machine to target"
@property
def name(self) -> str:
return "push"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"src_paths", help="Path of file(s) to copy to the target", nargs="+"
)
parser.add_argument(
"dest_path",
help=(
"Directory relative to the data container of the application\n"
"to copy the files into. Will be created if non-existent"
),
type=BundleWithPath.parse,
)
super().add_parser_arguments(parser)
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
return await client.push(
bundle_id=bundle_id, src_paths=args.src_paths, dest_path=args.dest_path
)
class FSPullCommand(FSCommand):
@property
def description(self) -> str:
return "Copy a file inside an application's container"
@property
def name(self) -> str:
return "pull"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"src", help="Relative Container source path", type=BundleWithPath.parse
)
parser.add_argument("dst", help="Local destination path", type=str)
super().add_parser_arguments(parser)
async def run_with_bundle(
self, bundle_id: str, args: Namespace, client: IdbClient
) -> None:
await client.pull(bundle_id=bundle_id, src_path=args.src, dest_path=args.dst)
class DeprecatedPushCommand(TargetCommand):
@property
def description(self) -> str:
return "Copy file(s) from local machine to target"
@property
def name(self) -> str:
return "push"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"src_paths", help="Path of file(s) to copy to the target", nargs="+"
)
parser.add_argument(
"bundle_id", help="Bundle id of the app to contain these files", type=str
)
parser.add_argument(
"dest_path",
help=(
"Directory relative to the data container of the application\n"
"to copy the files into. Will be created if non-existent"
),
type=str,
)
super().add_parser_arguments(parser)
async def run_with_client(self, args: Namespace, client: IdbClient) -> None:
self.logger.warning(f"'push' is deprecated, please use 'file push' instead")
return await FSPushCommand().run_with_bundle(
bundle_id=args.bundle_id, args=args, client=client
)
class DeprecatedPullCommand(TargetCommand):
@property
def description(self) -> str:
return "Copy a file inside an application's container"
@property
def name(self) -> str:
return "pull"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"bundle_id", help="Bundle id of the app to contain these files", type=str
)
parser.add_argument("src", help="Relativer Container source path", type=str)
parser.add_argument("dst", help="Local destination path", type=str)
super().add_parser_arguments(parser)
async def run_with_client(self, args: Namespace, client: IdbClient) -> None:
self.logger.warning(f"'pull' is deprecated, please use 'file pull' instead")
return await FSPullCommand().run_with_bundle(
bundle_id=args.bundle_id, args=args, client=client
)
| 30.996753 | 88 | 0.627527 | [
"MIT"
] | BalestraPatrick/idb | idb/cli/commands/file.py | 9,547 | Python |
from __future__ import print_function
import numpy as np
import scipy.linalg
import torch
import torch.nn as nn
import torch.nn.functional as F
from flow_modules.misc import cpd_sum, cpd_mean
def squeeze2d(input, factor=2):
#assert factor >= 1 and isinstance(factor, int)
if factor == 1:
return input
size = input.size()
B = size[0]
C = size[1]
H = size[2]
W = size[3]
assert H % factor == 0 and W % factor == 0, "{}".format((H, W))
x = input.view(B, C, H // factor, factor, W // factor, factor)
x = x.permute(0, 1, 3, 5, 2, 4).contiguous()
x = x.view(B, C * factor * factor, H // factor, W // factor)
return x
def unsqueeze2d(input, factor=2):
assert factor >= 1 and isinstance(factor, int)
factor2 = factor ** 2
if factor == 1:
return input
size = input.size()
B = size[0]
C = size[1]
H = size[2]
W = size[3]
assert C % (factor2) == 0, "{}".format(C)
x = input.view(B, C // factor2, factor, factor, H, W)
x = x.permute(0, 1, 4, 2, 5, 3).contiguous()
x = x.view(B, C // (factor2), H * factor, W * factor)
return x
class SqueezeLayer(nn.Module):
def __init__(self, factor):
super(SqueezeLayer, self).__init__()
self.factor = factor
def forward(self, input, logdet=0., reverse=False):
if not reverse:
output = squeeze2d(input, self.factor)
return output, logdet
else:
output = unsqueeze2d(input, self.factor)
return output, logdet
class InvertibleConv1x1(nn.Module):
def __init__(self, num_channels, LU_decomposed=True):
super().__init__()
w_shape = [num_channels, num_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
self.register_buffer('p', torch.Tensor(np_p.astype(np.float32)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
def get_weight(self, input, reverse):
w_shape = self.w_shape
pixels = list(input.size())[-1]
if not self.LU:
#thops.pixels(input)
dlogdet = (torch.slogdet(self.weight)[1]) * pixels*pixels
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1, 1)
else:
weight = torch.inverse(self.weight.double()).float()\
.view(w_shape[0], w_shape[1], 1, 1)
return weight, dlogdet
else:
self.p = self.p.to(input.device)
self.sign_s = self.sign_s.to(input.device)
self.l_mask = self.l_mask.to(input.device)
self.eye = self.eye.to(input.device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
dlogdet = cpd_sum(self.log_s) * pixels*pixels
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.cpu().double()).float()
u = torch.inverse(u.cpu().double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.cpu().inverse()))
if torch.cuda.is_available():
w = w.cuda()
return w.view(w_shape[0], w_shape[1], 1, 1), dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * pixels
"""
weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
z = F.conv2d(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
z = F.conv2d(input, weight)
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
class Actnormlayer(nn.Module):
def __init__(self, num_features, scale=1.):
super(Actnormlayer, self).__init__()
self.register_buffer('is_initialized', torch.zeros(1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.logs = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.num_features = num_features
self.scale = float(scale)
self.eps = 1e-6
def initialize_parameters(self, x):
if not self.training:
return
with torch.no_grad():
bias = -cpd_mean(x.clone(), dim=[0, 2, 3], keepdims=True)
v = cpd_mean((x.clone() + bias) ** 2, dim=[0, 2, 3], keepdims=True)
logs = (self.scale / (v.sqrt() + self.eps)).log()
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
self.is_initialized += 1.
def _center(self, x, reverse=False):
if reverse:
return x - self.bias
else:
return x + self.bias
def _scale(self, x, sldj, reverse=False):
logs = self.logs
if reverse:
x = x * logs.mul(-1).exp()
else:
x = x * logs.exp()
if sldj is not None:
ldj = logs.sum() * x.size(2) * x.size(3)
if reverse:
sldj = sldj - ldj
else:
sldj = sldj + ldj
return x, sldj
def forward(self, x, ldj=None, reverse=False):
if not self.is_initialized:
self.initialize_parameters(x)
if reverse:
x, ldj = self._scale(x, ldj, reverse)
x = self._center(x, reverse)
else:
x = self._center(x, reverse)
x, ldj = self._scale(x, ldj, reverse)
return x, ldj
class Split2dMsC(nn.Module):
def __init__(self, num_channels, level=0):
super().__init__()
self.level = level
def split_feature(self, z):
return z[:,:z.size(1)//2,:,:], z[:,z.size(1)//2:,:,:]
def split2d_prior(self, z):
h = self.conv(z)
return h[:,0::2,:,:], h[:,1::2,:,:]
def forward(self, input, logdet=0., reverse=False, eps_std=None):
if not reverse:
z1, z2 = self.split_feature(input)
return ( z1, z2), logdet
else:
z1, z2 = input
z = torch.cat((z1, z2), dim=1)
return z, logdet
class TupleFlip(nn.Module):
def __init__(self, ):
super().__init__()
def forward(self, z, logdet=0., reverse=False):
if not reverse:
z1, z2 = z.chunk(2, dim=1)
return torch.cat([z2,z1], dim=1), logdet
else:
z2, z1 = z.chunk(2, dim=1)
return torch.cat([z1,z2], dim=1), logdet
class GaussianDiag:
Log2PI = float(np.log(2 * np.pi))
@staticmethod
def likelihood(mean, logs, x):
return -0.5 * (logs * 2. + ((x - mean) ** 2) / torch.exp(logs * 2.) + GaussianDiag.Log2PI)
@staticmethod
def logp(mean, logs, x):
likelihood = GaussianDiag.likelihood(mean, logs, x)
return cpd_sum(likelihood, dim=[1,2,3])
@staticmethod
def sample(mean, logs, eps_std=None):
eps_std = eps_std or 1
eps = torch.normal(mean=torch.zeros_like(mean),
std=torch.ones_like(logs) * eps_std)
return mean + torch.exp(logs) * eps
| 28.222222 | 106 | 0.653543 | [
"Apache-2.0"
] | Catherine0505/mar-scf-flow | flow_modules/common_modules.py | 6,858 | Python |
# -*- coding: utf-8 -*-
from __future__ import division
import os
import pytest
from ethereum import _solidity
from ethereum._solidity import compile_file
from ethereum.utils import denoms
from pyethapp.rpc_client import JSONRPCClient
from pyethapp.jsonrpc import default_gasprice
from raiden.network.rpc.client import (
decode_topic, patch_send_transaction, patch_send_message
)
from raiden.utils import privatekey_to_address, get_contract_path
from raiden.blockchain.abi import CHANNEL_MANAGER_ABI
solidity = _solidity.get_solidity() # pylint: disable=invalid-name
@pytest.mark.timeout(180)
@pytest.mark.parametrize('privatekey_seed', ['blockchain:{}'])
@pytest.mark.parametrize('number_of_nodes', [3])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_assets', [0])
def test_new_netting_contract(raiden_network, asset_amount, settle_timeout):
# pylint: disable=line-too-long,too-many-statements,too-many-locals
app0, app1, app2 = raiden_network
peer0_address = app0.raiden.address
peer1_address = app1.raiden.address
peer2_address = app2.raiden.address
blockchain_service0 = app0.raiden.chain
asset_address = blockchain_service0.deploy_and_register_asset(
contract_name='HumanStandardToken',
contract_file='HumanStandardToken.sol',
constructor_parameters=(asset_amount, 'raiden', 2, 'Rd'),
)
asset0 = blockchain_service0.asset(asset_address)
for transfer_to in raiden_network[1:]:
asset0.transfer(
privatekey_to_address(transfer_to.raiden.privkey),
asset_amount // len(raiden_network),
)
manager0 = blockchain_service0.manager_by_asset(asset_address)
# sanity
assert manager0.channels_addresses() == []
assert manager0.channels_by_participant(peer0_address) == []
assert manager0.channels_by_participant(peer1_address) == []
assert manager0.channels_by_participant(peer2_address) == []
# create one channel
netting_address_01 = manager0.new_netting_channel(
peer0_address,
peer1_address,
settle_timeout,
)
# check contract state
netting_channel_01 = blockchain_service0.netting_channel(netting_address_01)
assert netting_channel_01.isopen() is False
assert netting_channel_01.partner(peer0_address) == peer1_address
assert netting_channel_01.partner(peer1_address) == peer0_address
# check channels
channel_list = manager0.channels_addresses()
assert sorted(channel_list[0]) == sorted([peer0_address, peer1_address])
assert manager0.channels_by_participant(peer0_address) == [netting_address_01]
assert manager0.channels_by_participant(peer1_address) == [netting_address_01]
assert manager0.channels_by_participant(peer2_address) == []
# create other chanel
netting_address_02 = manager0.new_netting_channel(
peer0_address,
peer2_address,
settle_timeout,
)
netting_channel_02 = blockchain_service0.netting_channel(netting_address_02)
assert netting_channel_02.isopen() is False
assert netting_channel_02.partner(peer0_address) == peer2_address
assert netting_channel_02.partner(peer2_address) == peer0_address
channel_list = manager0.channels_addresses()
expected_channels = [
sorted([peer0_address, peer1_address]),
sorted([peer0_address, peer2_address]),
]
for channel in channel_list:
assert sorted(channel) in expected_channels
result0 = sorted(manager0.channels_by_participant(peer0_address))
result1 = sorted([netting_address_01, netting_address_02])
assert result0 == result1
assert manager0.channels_by_participant(peer1_address) == [netting_address_01]
assert manager0.channels_by_participant(peer2_address) == [netting_address_02]
# deposit without approve should fail
netting_channel_01.deposit(peer0_address, 100)
assert netting_channel_01.isopen() is False
assert netting_channel_02.isopen() is False
assert netting_channel_01.detail(peer0_address)['our_balance'] == 0
assert netting_channel_01.detail(peer1_address)['our_balance'] == 0
# single-funded channel
app0.raiden.chain.asset(asset_address).approve(netting_address_01, 100)
netting_channel_01.deposit(peer0_address, 100)
assert netting_channel_01.isopen() is True
assert netting_channel_02.isopen() is False
assert netting_channel_01.detail(peer0_address)['our_balance'] == 100
assert netting_channel_01.detail(peer1_address)['our_balance'] == 0
# double-funded channel
app0.raiden.chain.asset(asset_address).approve(netting_address_02, 70)
netting_channel_02.deposit(peer0_address, 70)
assert netting_channel_01.isopen() is True
assert netting_channel_02.isopen() is True
assert netting_channel_02.detail(peer0_address)['our_balance'] == 70
assert netting_channel_02.detail(peer2_address)['our_balance'] == 0
app2.raiden.chain.asset(asset_address).approve(netting_address_02, 130)
app2.raiden.chain.netting_channel(netting_address_02).deposit(peer2_address, 130)
assert netting_channel_01.isopen() is True
assert netting_channel_02.isopen() is True
assert netting_channel_02.detail(peer0_address)['our_balance'] == 70
assert netting_channel_02.detail(peer2_address)['our_balance'] == 130
@pytest.mark.skipif(
'TRAVIS' in os.environ,
reason='Flaky test due to mark.timeout not being scheduled. Issue #319'
)
@pytest.mark.timeout(60)
@pytest.mark.parametrize('privatekey_seed', ['blockchain:{}'])
@pytest.mark.parametrize('number_of_nodes', [3])
def test_blockchain(
blockchain_type,
blockchain_backend, # required to start the geth backend
blockchain_rpc_ports,
private_keys,
poll_timeout):
# pylint: disable=too-many-locals
# this test is for interaction with a blockchain using json-rpc, so it
# doesnt make sense to execute it against mock or tester
if blockchain_type not in ('geth',):
return
addresses = [
privatekey_to_address(priv)
for priv in private_keys
]
privatekey = private_keys[0]
address = privatekey_to_address(privatekey)
total_asset = 100
jsonrpc_client = JSONRPCClient(
port=blockchain_rpc_ports[0],
privkey=privatekey,
print_communication=False,
)
patch_send_transaction(jsonrpc_client)
patch_send_message(jsonrpc_client)
humantoken_path = get_contract_path('HumanStandardToken.sol')
humantoken_contracts = compile_file(humantoken_path, libraries=dict())
token_proxy = jsonrpc_client.deploy_solidity_contract(
address,
'HumanStandardToken',
humantoken_contracts,
dict(),
(total_asset, 'raiden', 2, 'Rd'),
contract_path=humantoken_path,
gasprice=default_gasprice,
timeout=poll_timeout,
)
registry_path = get_contract_path('Registry.sol')
registry_contracts = compile_file(registry_path)
registry_proxy = jsonrpc_client.deploy_solidity_contract(
address,
'Registry',
registry_contracts,
dict(),
tuple(),
contract_path=registry_path,
gasprice=default_gasprice,
timeout=poll_timeout,
)
log_list = jsonrpc_client.call(
'eth_getLogs',
{
'fromBlock': '0x0',
'toBlock': 'latest',
'topics': [],
},
)
assert len(log_list) == 0
# pylint: disable=no-member
assert token_proxy.balanceOf(address) == total_asset
transaction_hash = registry_proxy.addAsset.transact(
token_proxy.address,
gasprice=denoms.wei,
)
jsonrpc_client.poll(transaction_hash.decode('hex'), timeout=poll_timeout)
assert len(registry_proxy.assetAddresses.call()) == 1
log_list = jsonrpc_client.call(
'eth_getLogs',
{
'fromBlock': '0x0',
'toBlock': 'latest',
'topics': [],
},
)
assert len(log_list) == 1
channel_manager_address_encoded = registry_proxy.channelManagerByAsset.call(
token_proxy.address,
)
channel_manager_address = channel_manager_address_encoded.decode('hex')
log = log_list[0]
log_topics = [
decode_topic(topic)
for topic in log['topics'] # pylint: disable=invalid-sequence-index
]
log_data = log['data']
event = registry_proxy.translator.decode_event(
log_topics,
log_data[2:].decode('hex'),
)
assert channel_manager_address == event['channel_manager_address'].decode('hex')
assert token_proxy.address == event['asset_address'].decode('hex')
channel_manager_proxy = jsonrpc_client.new_contract_proxy(
CHANNEL_MANAGER_ABI,
channel_manager_address,
)
transaction_hash = channel_manager_proxy.newChannel.transact(
addresses[1],
10,
gasprice=denoms.wei,
)
jsonrpc_client.poll(transaction_hash.decode('hex'), timeout=poll_timeout)
log_list = jsonrpc_client.call(
'eth_getLogs',
{
'fromBlock': '0x0',
'toBlock': 'latest',
'topics': [],
},
)
assert len(log_list) == 2
| 33.647273 | 85 | 0.711985 | [
"MIT"
] | nicksavers/raiden | raiden/tests/integration/test_blockchainservice.py | 9,253 | Python |
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import warnings
from ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning
from ruamel.yaml.compat import utf8
from ruamel.yaml.events import (
StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
)
from ruamel.yaml.nodes import (
MappingNode, ScalarNode, SequenceNode,
)
__all__ = ['Composer', 'ComposerError']
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError(
"expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
alias = event.anchor
if alias not in self.anchors:
raise ComposerError(
None, None, "found undefined alias %r"
% utf8(alias), event.start_mark)
return self.anchors[alias]
event = self.peek_event()
anchor = event.anchor
if anchor is not None: # have an anchor
if anchor in self.anchors:
# raise ComposerError(
# "found duplicate anchor %r; first occurence"
# % utf8(anchor), self.anchors[anchor].start_mark,
# "second occurence", event.start_mark)
ws = "\nfound duplicate anchor {!r}\nfirst occurence {}\nsecond occurence "\
"{}".format(
(anchor), self.anchors[anchor].start_mark, event.start_mark)
warnings.warn(ws, ReusedAnchorWarning)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style,
comment=event.comment)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style,
comment=start_event.comment, anchor=anchor)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
if node.flow_style is True and end_event.comment is not None:
if node.comment is not None:
print('Warning: unexpected end_event commment in sequence '
'node {}'.format(node.flow_style))
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style,
comment=start_event.comment, anchor=anchor)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
# key_event = self.peek_event()
item_key = self.compose_node(node, None)
# if item_key in node.value:
# raise ComposerError("while composing a mapping",
# start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
# node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
if node.flow_style is True and end_event.comment is not None:
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def check_end_doc_comment(self, end_event, node):
if end_event.comment and end_event.comment[1]:
# pre comments on an end_event, no following to move to
if node.comment is None:
node.comment = [None, None]
assert not isinstance(node, ScalarEvent)
# this is a post comment on a mapping node, add as third element
# in the list
node.comment.append(end_event.comment[1])
end_event.comment[1] = None
| 37.603261 | 92 | 0.599798 | [
"MIT"
] | mpercich/Calendarize | ios/dateparser/lib/python2.7/site-packages/ruamel/yaml/composer.py | 6,919 | Python |
import argparse
import locale
import sys
from datetime import datetime
from model import *
from sql import *
from common import *
from util import *
def parse_arguments():
'''
Parse input arguments. Passing the API key is defined as mandatory.
'''
parser = argparse.ArgumentParser(description='Incrementally exports JSON orders data into CSV format and optionally into a SQLite DB.')
parser.add_argument('-k', '--key', type=str, required=True, help='API key to be used to perform the REST request to the backend.')
parser.add_argument('-l', '--locale', type=str, required=False, help='Specify the locale: it_IT for italian. Otherwise machine default one.')
parser.add_argument('-d', '--db', action='store_true', required=False, help='Instruct the tool to load a SQLite database up.')
parser.add_argument('-p', '--path', type=str, required=True, help='Define datastore base path to csv/ and db/ folders (csv/ and db/ folders should be already created).')
parser.add_argument('-n', '--number', type=int, required=True, help='Define how many records each REST call should pull down.')
parser.add_argument('-c', '--customer', type=int, required=False, help='Define whether the customer table should be updated contextually: it requires the number of cycles per page (max 50 records')
args = parser.parse_args()
return args
def main():
args = parse_arguments()
if args.locale:
locale.setlocale(locale.LC_ALL, args.locale)
else:
locale.setlocale(locale.LC_ALL, 'en_GB')
datastore_path = args.path
nr_records = args.number
if not is_path_existent('%s/%s' % (datastore_path, 'csv')):
sys.exit(1)
if not is_path_existent('%s/%s' % (datastore_path, 'db')):
sys.exit(1)
# load or refresh the customer table for enrichment
if args.customer:
customers = load_customers_pages(args.key, args.customer)
persist_customers_to_sqlite(customers, datastore_path)
# looking up the customers for successive enrichment of orders
lookup = lookup_customers(datastore_path)
orders = load_orders_pages(args.key, nr_records, lookup)
print('info: loaded %d order(s)...' % len(orders))
print(orders[0])
print('info: all records between FIRST and LAST\n')
print(orders[-1])
export_to_csv(orders, datastore_path)
print('info: CSV export successul %d order(s)' % len(orders))
if args.db:
export_to_sqlite(orders, datastore_path)
if __name__ == "__main__":
main()
| 39.9375 | 201 | 0.691706 | [
"MIT"
] | hailpam/data-crunching | scripts/orders-exporter.py | 2,556 | Python |
# -*- coding: utf-8 -*-
import QUANTAXIS as QA
from QUANTAXIS.QAFetch import QATusharePro as pro
import pandas as pd
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark import SparkContext,SparkConf
from pyspark.sql.session import SparkSession
from QUANTAXIS.ML import RegUtil
from pyspark.sql.types import StructType,DoubleType,StructField,StringType
#from pyspark.sql.functions import
import copy
import talib
spark = SparkSession.builder.appName("my app").getOrCreate()
#spark.sparkContext.setLogLevel("INFO")
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
start_3years_bf = '20150101'
industry_daily = pro.QA_fetch_get_industry_daily(start=start_3years_bf, end='20181231').sort_values(['industry','trade_date'], ascending = True)
industry_daily = spark.createDataFrame(industry_daily)
new_struct = ['q_dtprofit_ttm_poly', 'q_gr_poly', 'q_profit_poly', 'q_dtprofit_poly', 'q_opincome_poly', 'industry_roe', 'industry_pe', 'roe_ttm', 'industry_pe_ttm']
p1 = StructType()
p1.add(StructField('trade_date', StringType()))
p1.add(StructField('industry', StringType()))
list(map(lambda x: p1.add(StructField(x, DoubleType())), new_struct))
start = '20180101'
end = '20181231'
@pandas_udf(p1, PandasUDFType.GROUPED_MAP)
def _trend(key,data):
dates = [str(int(start[0:4]) - 3) + '0831',str(int(start[0:4]) - 3) + '1031',
str(int(start[0:4]) - 2) + '0431', str(int(start[0:4]) - 2) + '0831',
str(int(start[0:4]) - 2) + '1031', str(int(start[0:4]) - 1) + '0431',
str(int(start[0:4]) - 1) + '0831', str(int(start[0:4]) - 1) + '1031']
_lam_f = lambda x, y: y[y.trade_date <= x].iloc[-1] if y[y.trade_date <= x].shape[0]>0 else None
resampledf = pd.DataFrame(list(filter(lambda x:x is not None,map(_lam_f, dates,[data]*8))))
col = ['trade_date', 'industry']
col = col+new_struct
indicator = pd.DataFrame(columns=col)
df = data[data.trade_date >= start]
df.reset_index(drop=True)
for index,item in df.iterrows():
if item.trade_date[4:8] <= "0831" and item.trade_date[4:8] > "0431" and item.trade_date[0:4] + '0431' not in dates:
dates.append([item.trade_date[0:4] + '0431'])
t = list(filter(lambda x:x is not None,map(_lam_f, [item.trade_date[0:4] + '0431'],[data])))
if t is not None:
resampledf = resampledf.append(t)
if item.trade_date[4:8] <= "1031" and item.trade_date[4:8] > "0831" and item.trade_date[0:4] + '0831' not in dates:
dates.append([item.trade_date[0:4] + '0831'])
t = list(filter(lambda x: x is not None, map(_lam_f, [item.trade_date[0:4] + '0831'], [data])))
if t is not None:
resampledf = resampledf.append(t)
if item.trade_date[4:8] > "1031" and item.trade_date[0:4] + '1031' not in dates:
dates.append([item.trade_date[0:4] + '1031'])
t = list(filter(lambda x: x is not None, map(_lam_f, [item.trade_date[0:4] + '1031'], [data])))
if t is not None:
resampledf = resampledf.append(t)
resample = resampledf.append(list(map(_lam_f, [item.trade_date], [data])))
resample = resample.dropna(how='all')
ind = -8 if resample.shape[0]>8 else -resample.shape[0]
fit, p3 = RegUtil.regress_y_polynomial(resample[ind:].q_dtprofit_ttm, poly=3, show=False)
# fit, p4 = RegUtil.regress_y_polynomial(resample[-8:].q_opincome_ttm, poly=3, show=False)
fit, p5 = RegUtil.regress_y_polynomial(resample[ind:].q_gr, poly=3, show=False)
fit, p6 = RegUtil.regress_y_polynomial(resample[ind:].q_profit, poly=3, show=False)
fit, p7 = RegUtil.regress_y_polynomial(resample[ind:].q_dtprofit, poly=3, show=False)
fit, p8 = RegUtil.regress_y_polynomial(resample[ind:].q_opincome, poly=3, show=False)
roe = item.q_dtprofit / item.total_hldr_eqy_exc_min_int
pe = item.ind_total_mv*10000/item.q_dtprofit
roe_ttm = item.q_dtprofit_ttm / item.total_hldr_eqy_exc_min_int
pe_ttm = item.ind_total_mv*10000/item.q_dtprofit_ttm
indicator.loc[index] = [item.trade_date,key[0],p3(8),p5(8),p6(8),p7(8),p8(8),roe,pe,roe_ttm,pe_ttm]
#print(indicator.loc[index])
return indicator
industry_daily = industry_daily.groupby("industry").apply(_trend).cache()
stock = pro.QA_SU_stock_info()
stock_spark = spark.createDataFrame(stock)
basic = pd.read_csv('/usr/local/spark/basic-2018.csv')
basic = spark.createDataFrame(basic)
#df = basic.join(stock_spark, basic.ts_code==stock_spark.ts_code, "inner")
df = basic.join(stock_spark,['ts_code'],"inner")
#industry_daily.count()
df = df.join(industry_daily,['industry', 'trade_date'],"inner")
new2_struct = [ 'cnt', 'mean', 'std', 'min', 'per25', 'per50', 'per75', 'per85', 'per95', 'max']
p2 = StructType()
p2.add(StructField('category', StringType()))
p2.add(StructField('industry', StringType()))
list(map(lambda x: p2.add(StructField(x, DoubleType())), new2_struct))
@pandas_udf(p2, PandasUDFType.GROUPED_MAP)
def _dailystat(key,df):
d = df.loc[:, ['q_dtprofit_ttm_poly','q_gr_poly','q_profit_poly','q_dtprofit_poly','q_opincome_poly','industry_roe','industry_pe','roe_ttm','industry_pe_ttm']]
st = d.describe([.25, .5, .75, .85, .95]).T.reset_index(level=0)
col = ['category']
col = col+new2_struct
st.columns = col
st.loc[:,'industry'] = key[0]
median = d.median()
mad = abs(d - median).median()
d[d - (median - mad * 3 * 1.4826) < 0] = np.array((median - mad * 3 * 1.4826).tolist()*d.shape[0]).reshape((d.shape[0],d.columns.size))
d[d - (median + mad * 3 * 1.4826) > 0] = np.array((median + mad * 3 * 1.4826).tolist()*d.shape[0]).reshape((d.shape[0],d.columns.size))
st2 = d.describe([.25, .5, .85, .90, .95]).T.reset_index(level=0)
st2.columns = col
st2.loc[:,'industry'] = key[0]
st2.category = st2.category+'_mad'
return pd.concat([st, st2])
dailymarket = industry_daily.groupby('trade_date').apply(_dailystat).toPandas()
#
add3_struct = ['industry_roe_buy', 'industry_pe_buy', 'q_dtprofit_poly_buy', 'industry_roe_ttm_buy', 'industry_pe_ttm_buy', 'q_dtprofit_ttm_poly_buy', 'industry_roe_buy_mad', 'industry_pe_buy_mad', 'q_dtprofit_poly_buy_mad', 'industry_roe_ttm_buy_mad', 'industry_pe_ttm_buy_mad', 'q_dtprofit_ttm_poly_buy_mad']
p3 = copy.deepcopy(df.schema)
list(map(lambda x: p3.add(StructField(x, DoubleType())), add3_struct))
p3.add(StructField('key_flag', StringType()))
k = 0
d = []
ud = []
#print(p3)
#print(df.columns)
@pandas_udf(p3, PandasUDFType.GROUPED_MAP)
def _top10(key,df2):
global dailymarket
global k
df = pd.concat([df2, pd.DataFrame(columns=add3_struct, dtype='float')])
market = dailymarket[dailymarket.industry == key[0]]
ud.append(key[0])
#print(market)
df.loc[:,'key_flag'] = key[0]
if market.shape[0]:
df.loc[:, 'industry_roe_buy'] = df.industry_roe - market[market.category == 'industry_roe'].per90[0]
df.loc[:, 'industry_pe_buy'] = df.industry_pe - market[market.category == 'industry_pe'].per85[0]
df.loc[:, 'q_dtprofit_poly_buy'] = df.q_dtprofit_poly - market[market.category == 'q_dtprofit_poly'].per85[0]
df.loc[:, 'industry_roe_ttm_buy'] = df.roe_ttm - market[market.category == 'roe_ttm'].per90[0]
df.loc[:, 'industry_pe_ttm_buy'] = df.industry_pe_ttm - market[market.category == 'industry_pe_ttm'].per85[0]
df.loc[:, 'q_dtprofit_ttm_poly_buy'] = df.q_dtprofit_ttm_poly - market[market.category == 'q_dtprofit_ttm_poly'].per85[0]
df.loc[:, 'industry_roe_buy_mad'] = df.industry_roe - market[market.category == 'industry_roe_mad'].per90[0]
df.loc[:, 'industry_pe_buy_mad'] = df.industry_pe - market[market.category == 'industry_pe_mad'].per85[0]
df.loc[:, 'q_dtprofit_poly_buy_mad'] = df.q_dtprofit_poly - market[market.category == 'q_dtprofit_poly_mad'].per85[0]
df.loc[:, 'industry_roe_ttm_buy_mad'] = df.roe_ttm - market[market.category == 'roe_ttm_mad'].per90[0]
df.loc[:, 'industry_pe_ttm_buy_mad'] = df.industry_pe_ttm - market[market.category == 'industry_pe_ttm_mad'].per85[0]
df.loc[:, 'q_dtprofit_ttm_poly_buy_mad'] = df.q_dtprofit_ttm_poly - market[market.category == 'q_dtprofit_ttm_poly_mad'].per85[0]
else:
k = k+1
d.append(key[0])
return df
rs = df.groupby('trade_date').apply(_top10).toPandas().set_index(['trade_date', 'ts_code'], drop=False)
print('############rs key flag ############')
print(rs.key_flag.unique())
print('############rs total count ############')
print(len(rs))
print('############ mised key ############')
print(k)
print('############ first 5 key ############')
print(ud[0:5])
#print(rs.head)
#
# if __name__ == '__main__':
# print('wtf')
# # finacial = pd.read_csv('/usr/local/spark/finace-2018.csv')
# # basic = pd.read_csv('/usr/local/spark/basic-2018.csv')
#
# #df = spark.createDataFrame(basic.loc[:,['ts_code','trade_date']])
# sv = simpleValued('20180101','20181231')
# df = sv.non_finacal_top5_valued()
# df1 = sv.industry_trend_top10(df)
# df1.toPandas().set_index(['trade_date', 'ts_code'], drop=False) | 55.97561 | 310 | 0.666993 | [
"MIT"
] | lkaiser/QUANTAXIS | EXAMPLE/test_backtest/example/indicator/simple_valued_spark2.py | 9,180 | Python |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.postgres.relationsmanager import ALL_SCHEMAS, IDX_METRICS, LOCK_METRICS, RelationsManager
from .common import SCHEMA_NAME
pytestmark = pytest.mark.unit
@pytest.mark.parametrize(
'relations_config,expected_filter',
[
(
[
{'relation_regex': 'ix.*', 'schemas': ['public', 's1', 's2']},
{'relation_regex': 'ibx.*', 'schemas': ['public']},
{'relation_regex': 'icx.*', 'schemas': ['public']},
],
"( relname ~ 'ix.*' AND schemaname = ANY(array['public','s1','s2']::text[]) ) "
"OR ( relname ~ 'ibx.*' AND schemaname = ANY(array['public']::text[]) ) "
"OR ( relname ~ 'icx.*' AND schemaname = ANY(array['public']::text[]) )",
),
(
[
{'relation_regex': '.+_archive'},
],
"( relname ~ '.+_archive' )",
),
(
[
{'relation_name': 'my_table', 'schemas': ['public', 'app'], 'relkind': ['r']}, # relkind ignored
{'relation_name': 'my_table2', 'relkind': ['p', 'r']}, # relkind ignored
{'relation_regex': 'table.*'},
],
"( relname = 'my_table' AND schemaname = ANY(array['public','app']::text[]) ) "
"OR ( relname = 'my_table2' ) "
"OR ( relname ~ 'table.*' )",
),
(
['table1', 'table2'],
"( relname = 'table1' ) OR ( relname = 'table2' )",
),
],
)
def test_relations_cases(relations_config, expected_filter):
query = '{relations}'
relations = RelationsManager(relations_config)
query_filter = relations.filter_relation_query(query, SCHEMA_NAME)
assert query_filter == expected_filter
def test_relation_filter():
query = "Select foo from bar where {relations}"
relations_config = [{'relation_name': 'breed', 'schemas': ['public']}]
relations = RelationsManager(relations_config)
query_filter = relations.filter_relation_query(query, SCHEMA_NAME)
assert (
query_filter == "Select foo from bar where ( relname = 'breed' AND schemaname = ANY(array['public']::text[]) )"
)
def test_relation_filter_no_schemas():
query = "Select foo from bar where {relations}"
relations_config = [{'relation_name': 'persons', 'schemas': [ALL_SCHEMAS]}]
relations = RelationsManager(relations_config)
query_filter = relations.filter_relation_query(query, SCHEMA_NAME)
assert query_filter == "Select foo from bar where ( relname = 'persons' )"
def test_relation_filter_regex():
query = "Select foo from bar where {relations}"
relations_config = [{'relation_regex': 'b.*', 'schemas': [ALL_SCHEMAS]}]
relations = RelationsManager(relations_config)
query_filter = relations.filter_relation_query(query, SCHEMA_NAME)
assert query_filter == "Select foo from bar where ( relname ~ 'b.*' )"
def test_relation_filter_relkind():
query = LOCK_METRICS['query'].replace('{metrics_columns}', 'foo')
relations_config = [{'relation_regex': 'b.*', 'schemas': [ALL_SCHEMAS], 'relkind': ['r', 't']}]
relations = RelationsManager(relations_config)
query_filter = relations.filter_relation_query(query, SCHEMA_NAME)
assert "AND relkind = ANY(array['r','t'])" in query_filter
def test_relkind_does_not_apply_to_index_metrics():
query = IDX_METRICS['query'].replace('{metrics_columns}', 'foo')
relations_config = [{'relation_regex': 'b.*', 'schemas': [ALL_SCHEMAS], 'relkind': ['r']}]
relations = RelationsManager(relations_config)
query_filter = relations.filter_relation_query(query, SCHEMA_NAME)
assert 'relkind' not in query_filter
| 38.35 | 119 | 0.618774 | [
"BSD-3-Clause"
] | Kyle-Neale/integrations-core | postgres/tests/test_relationsmanager.py | 3,835 | Python |
"""Common classes and elements for Omnilogic Integration."""
from datetime import timedelta
import logging
from omnilogic import OmniLogicException
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ALL_ITEM_KINDS,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class OmniLogicUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching update data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
api: str,
name: str,
polling_interval: int,
):
"""Initialize the global Omnilogic data updater."""
self.api = api
super().__init__(
hass=hass,
logger=_LOGGER,
name=name,
update_interval=timedelta(seconds=polling_interval),
)
async def _async_update_data(self):
"""Fetch data from OmniLogic."""
try:
data = await self.api.get_telemetry_data()
except OmniLogicException as error:
raise UpdateFailed(f"Error updating from OmniLogic: {error}") from error
parsed_data = {}
def get_item_data(item, item_kind, current_id, data):
"""Get data per kind of Omnilogic API item."""
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if "systemId" in item:
system_id = item["systemId"]
current_id = current_id + (item_kind, system_id)
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if kind in item:
data = get_item_data(item[kind], kind, current_id, data)
return data
parsed_data = get_item_data(data, "Backyard", (), parsed_data)
return parsed_data
class OmniLogicEntity(CoordinatorEntity):
"""Defines the base OmniLogic entity."""
def __init__(
self,
coordinator: OmniLogicUpdateCoordinator,
kind: str,
name: str,
item_id: tuple,
icon: str,
):
"""Initialize the OmniLogic Entity."""
super().__init__(coordinator)
bow_id = None
entity_data = coordinator.data[item_id]
backyard_id = item_id[:2]
if len(item_id) == 6:
bow_id = item_id[:4]
msp_system_id = coordinator.data[backyard_id]["systemId"]
entity_friendly_name = f"{coordinator.data[backyard_id]['BackyardName']} "
unique_id = f"{msp_system_id}"
if bow_id is not None:
unique_id = f"{unique_id}_{coordinator.data[bow_id]['systemId']}"
entity_friendly_name = (
f"{entity_friendly_name}{coordinator.data[bow_id]['Name']} "
)
unique_id = f"{unique_id}_{coordinator.data[item_id]['systemId']}_{kind}"
if entity_data.get("Name") is not None:
entity_friendly_name = f"{entity_friendly_name} {entity_data['Name']}"
entity_friendly_name = f"{entity_friendly_name} {name}"
unique_id = unique_id.replace(" ", "_")
self._kind = kind
self._name = entity_friendly_name
self._unique_id = unique_id
self._item_id = item_id
self._icon = icon
self._attrs = {}
self._msp_system_id = msp_system_id
self._backyard_name = coordinator.data[backyard_id]["BackyardName"]
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self):
"""Return the icon for the entity."""
return self._icon
@property
def device_state_attributes(self):
"""Return the attributes."""
return self._attrs
@property
def device_info(self):
"""Define the device as back yard/MSP System."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._msp_system_id)},
ATTR_NAME: self._backyard_name,
ATTR_MANUFACTURER: "Hayward",
ATTR_MODEL: "OmniLogic",
}
| 28.386076 | 84 | 0.611371 | [
"Apache-2.0"
] | 123dev/core | homeassistant/components/omnilogic/common.py | 4,485 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-20 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('librarian', '0108_dataset_is_external_missing'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='is_uploaded',
field=models.BooleanField(default=True, help_text='True if the file was uploaded, not an output.'),
),
]
| 25.142857 | 111 | 0.643939 | [
"BSD-3-Clause"
] | cfe-lab/Kive | kive/librarian/migrations/0109_dataset_is_uploaded.py | 528 | Python |
"""
MODULE : code_generation.py
Purpose : * Class for parsing the text and launching the basic block algorithm.
Also houses the code generation algorithm
"""
# List of Imports Begin
import debug as DEBUG
import instr3ac as INSTRUCTION
import basic_blocks as BB
import mips_assembly as ASM
import global_objects as G
import library as LIB
import shlex
import re
# List of Imports End
def LexerIR(inpString):
return re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', inpString)
class CodeGenerator(object):
"""
This class houses the basic-block generation and code-generation algorithm
Member Variables:
* instructions : Stores all the program instructions
* basicBlocks : Stores all the basic blocks
* targets : Which line IDs are goto targets. Used for basic block algorithm
"""
def __init__(self, text, fileName, symTabManager, funcActRecords):
text = text.split('\n')
text = [i.lstrip().rstrip() for i in text if i != '']
text = [i.replace('\t', '') for i in text]
self.instructions = []
self.basicBlocks = []
self.targets = set([])
self.allLineIDs = set([])
self.fileName = fileName
# Build Global Objects
G.AsmText = ASM.TextRegion(fileName)
G.AsmData = ASM.DataRegion(funcActRecords, symTabManager)
LIB.AddEssentialLibraries()
# Create an instance of the instruction class for each line
for line in text:
instrTuple = LexerIR(line)
if instrTuple == []:
continue
instrTuple = [i.lstrip().rstrip() for i in instrTuple]
instr = INSTRUCTION.Instr3AC(instrTuple)
# Disallow multiple input lines with same lineID
DEBUG.Assert(instr.lineID not in self.allLineIDs,"Multiple lines with same line ID.")
self.allLineIDs.add(instr.lineID)
self.instructions += [instr]
# print self.instructions[-1]
gotoTarget = self.instructions[-1].GetTarget()
if gotoTarget:
self.targets.add(gotoTarget)
# Identify the branch targets and set their isTarget value to true
for instr in self.instructions:
if "$LID_" + str(instr.lineID) in self.targets:
instr.isTarget = True
def GenBasicBlocks(self):
""" Generate basic blocks using the algorithm """
if len(self.instructions) == 0:
return
bbCount = 0
self.basicBlocks = [BB.BasicBlock()]
# First statement is a leader
self.basicBlocks[-1].AddInstruction(self.instructions[0])
for instr in self.instructions[1:]:
# Is the current instruction a branch target? If yes, it's a leader
if instr.IsTarget():
bbCount += 1
self.basicBlocks += [BB.BasicBlock(bbCount)]
self.basicBlocks[-1].AddInstruction(instr)
# Is next statement a leader?
if instr.instrType.is_JMP():
bbCount += 1
self.basicBlocks += [BB.BasicBlock(bbCount)]
self.basicBlocks = [bb for bb in self.basicBlocks if not bb.IsEmpty()]
for i, bb in enumerate(self.basicBlocks):
bb.bbNum = i
def PrintBasicBlocks(self):
for bb in self.basicBlocks:
bb.PrettyPrint()
def BuildCode(self):
filePtr = open(self.fileName.replace('.ir', '.s'), 'w+')
for bb in self.basicBlocks:
bb.Translate()
G.AsmText.WriteHeader(filePtr)
G.AsmData.GenerateDataRegion(filePtr)
G.AsmText.WriteToFile(filePtr)
filePtr.close()
| 30.096 | 97 | 0.597023 | [
"MIT"
] | vaishious/comperler | project/src/codegen/code_generation.py | 3,762 | Python |
# terrascript/provider/alertmixer/amixr.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:11:40 UTC)
import terrascript
class amixr(terrascript.Provider):
""""""
__description__ = ""
__namespace__ = "alertmixer"
__name__ = "amixr"
__source__ = "https://github.com/alertmixer/terraform-provider-amixr"
__version__ = "0.2.3"
__published__ = "2021-01-11T16:21:02Z"
__tier__ = "partner"
__all__ = ["amixr"]
| 22.85 | 73 | 0.691466 | [
"BSD-2-Clause"
] | mjuenema/python-terrascript | terrascript/provider/alertmixer/amixr.py | 457 | Python |
import pytest
from django.test import RequestFactory
from tslhub_accounts_api.users.api.views import UserViewSet
from tslhub_accounts_api.users.models import User
pytestmark = pytest.mark.django_db
class TestUserViewSet:
def test_get_queryset(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert user in view.get_queryset()
def test_me(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
response = view.me(request)
assert response.data == {
"username": user.username,
"name": user.name,
"url": f"http://testserver/api/users/{user.username}/",
}
| 25.470588 | 67 | 0.633949 | [
"MIT"
] | vvvti/tslhub-accounts-api | tslhub_accounts_api/users/tests/test_drf_views.py | 866 | Python |
#!/usr/bin/env python
# not used in this project.
import sys
sys.path.append('../gen-py')
from EyePi.ttypes import EyePiInput
from EyePi.ttypes import ConfirmInput
from GenericStruct.ttypes import ActionEnum
from WeatherPi.ttypes import WeatherInput
from ConnectionHelpers.DeviceRegistrator import DeviceRegistrator
from ConnectionHelpers.ConnectEyePi import ConnectEyePi
from thrift import Thrift
import cv2
import os.path
import random # test
import numpy as np
import pickle
sys.path.append('../../')
import config
### test
def read_image():
root, dirs, files=next(os.walk(config.file_path))
imageCollection=list(filter(lambda filename:filename.endswith('.jpg'), files))
imageCollection+=list(filter(lambda filename:filename.endswith('.png'), files))
return random.choice(imageCollection)
### end test
try:
## mock! ###
# normally a device would properly register itself and keep the token.
# But in development case, the cahce is resetted every time. This mock registers the device.
device_token = DeviceRegistrator().register_device()
### end mock ###
input = EyePiInput()
filename = config.file_path +read_image()
print('image == '+filename)
file = open(filename, 'rb')
readfile = file.read()
nparr = np.fromstring(readfile, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
input.image = pickle.dumps(obj=image, protocol=None, fix_imports=False)
actions = dict()
weather_input = WeatherInput()
weather_input.location = 'Amsterdam,nl'
actionParameter = pickle.dumps(obj=weather_input, protocol=None, fix_imports=False)
actions[ActionEnum.WEATHER] = actionParameter
input.action = actions
#parameter = GenericObject()
#parameter.stringValue = "%s" % 'Amsterdam,nl'
input.deviceToken = device_token
#input.action = ActionEnum.WEATHER
#input.actionParameters = parameter
output = ConnectEyePi().handleRequest(input)
print(output)
if output.ok:
for face in output.personCollection:
confirm_input = ConfirmInput()
confirm_input.image = face.image
confirm_input.person = face.person
ConnectEyePi().confimFace(confirm_input)
except Thrift.TException as tx:
print("%s" % (tx.message))
| 30.493333 | 96 | 0.716659 | [
"Apache-2.0"
] | emschimmel/BrainPi | 1IntegrationTests/py-impl/PythonEyePiClient.py | 2,287 | Python |
import json
from copy import deepcopy
from time import time
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from .....core.in_memory import InMemoryProfile
from .....indy.holder import IndyHolder
from .....indy.sdk.holder import IndySdkHolder
from .....indy.issuer import IndyIssuer
from .....ledger.base import BaseLedger
from .....messaging.decorators.attach_decorator import AttachDecorator
from .....messaging.request_context import RequestContext
from .....messaging.responder import BaseResponder, MockResponder
from .....storage.error import StorageNotFoundError
from .....indy.verifier import IndyVerifier
from .....indy.sdk.verifier import IndySdkVerifier
from ....didcomm_prefix import DIDCommPrefix
from ...indy.xform import indy_proof_req_preview2indy_requested_creds
from .. import manager as test_module
from ..manager import V20PresManager, V20PresManagerError
from ..message_types import (
ATTACHMENT_FORMAT,
PRES_20_PROPOSAL,
PRES_20_REQUEST,
PRES_20,
)
from ..messages.pres import V20Pres
from ..messages.pres_ack import V20PresAck
from ..messages.pres_format import V20PresFormat
from ..messages.pres_proposal import V20PresProposal
from ..messages.pres_request import V20PresRequest
from ..models.pres_exchange import V20PresExRecord
CONN_ID = "connection_id"
ISSUER_DID = "NcYxiDXkpYi6ov5FcYDi1e"
S_ID = f"{ISSUER_DID}:2:vidya:1.0"
CD_ID = f"{ISSUER_DID}:3:CL:{S_ID}:tag1"
RR_ID = f"{ISSUER_DID}:4:{CD_ID}:CL_ACCUM:0"
PROOF_REQ_NAME = "name"
PROOF_REQ_VERSION = "1.0"
PROOF_REQ_NONCE = "12345"
NOW = int(time())
INDY_PROOF_REQ_NAME = {
"name": PROOF_REQ_NAME,
"version": PROOF_REQ_VERSION,
"nonce": PROOF_REQ_NONCE,
"requested_attributes": {
"0_player_uuid": {
"name": "player",
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
},
"1_screencapture_uuid": {
"name": "screenCapture",
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
},
},
"requested_predicates": {
"0_highscore_GE_uuid": {
"name": "highScore",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
}
},
}
INDY_PROOF_REQ_NAMES = {
"name": PROOF_REQ_NAME,
"version": PROOF_REQ_VERSION,
"nonce": PROOF_REQ_NONCE,
"requested_attributes": {
"0_player_uuid": {
"names": ["player", "screenCapture"],
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
}
},
"requested_predicates": {
"0_highscore_GE_uuid": {
"name": "highScore",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
}
},
}
INDY_PROOF_REQ_SELFIE = {
"name": PROOF_REQ_NAME,
"version": PROOF_REQ_VERSION,
"nonce": PROOF_REQ_NONCE,
"requested_attributes": {
"self_player_uuid": {"name": "player"},
"self_screencapture_uuid": {"name": "screenCapture"},
},
"requested_predicates": {
"0_highscore_GE_uuid": {"name": "highScore", "p_type": ">=", "p_value": 1000000}
},
}
class TestV20PresManager(AsyncTestCase):
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
injector = self.profile.context.injector
Ledger = async_mock.MagicMock(BaseLedger, autospec=True)
self.ledger = Ledger()
self.ledger.get_schema = async_mock.CoroutineMock(
return_value=async_mock.MagicMock()
)
self.ledger.get_credential_definition = async_mock.CoroutineMock(
return_value={"value": {"revocation": {"...": "..."}}}
)
self.ledger.get_revoc_reg_def = async_mock.CoroutineMock(
return_value={
"ver": "1.0",
"id": RR_ID,
"revocDefType": "CL_ACCUM",
"tag": RR_ID.split(":")[-1],
"credDefId": CD_ID,
"value": {
"IssuanceType": "ISSUANCE_BY_DEFAULT",
"maxCredNum": 1000,
"publicKeys": {"accumKey": {"z": "1 ..."}},
"tailsHash": "3MLjUFQz9x9n5u9rFu8Ba9C5bo4HNFjkPNc54jZPSNaZ",
"tailsLocation": "http://sample.ca/path",
},
}
)
self.ledger.get_revoc_reg_delta = async_mock.CoroutineMock(
return_value=(
{
"ver": "1.0",
"value": {"prevAccum": "1 ...", "accum": "21 ...", "issued": [1]},
},
NOW,
)
)
self.ledger.get_revoc_reg_entry = async_mock.CoroutineMock(
return_value=(
{
"ver": "1.0",
"value": {"prevAccum": "1 ...", "accum": "21 ...", "issued": [1]},
},
NOW,
)
)
injector.bind_instance(BaseLedger, self.ledger)
Holder = async_mock.MagicMock(IndyHolder, autospec=True)
self.holder = Holder()
get_creds = async_mock.CoroutineMock(
return_value=(
{
"cred_info": {
"referent": "dummy_reft",
"attrs": {
"player": "Richie Knucklez",
"screenCapture": "aW1hZ2luZSBhIHNjcmVlbiBjYXB0dXJl",
"highScore": "1234560",
},
}
}, # leave this comma: return a tuple
)
)
self.holder.get_credentials_for_presentation_request_by_referent = get_creds
self.holder.get_credential = async_mock.CoroutineMock(
return_value=json.dumps(
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": RR_ID,
"cred_rev_id": 1,
}
)
)
self.holder.create_presentation = async_mock.CoroutineMock(return_value="{}")
self.holder.create_revocation_state = async_mock.CoroutineMock(
return_value=json.dumps(
{
"witness": {"omega": "1 ..."},
"rev_reg": {"accum": "21 ..."},
"timestamp": NOW,
}
)
)
injector.bind_instance(IndyHolder, self.holder)
Verifier = async_mock.MagicMock(IndyVerifier, autospec=True)
self.verifier = Verifier()
self.verifier.verify_presentation = async_mock.CoroutineMock(
return_value="true"
)
injector.bind_instance(IndyVerifier, self.verifier)
self.manager = V20PresManager(self.profile)
async def test_record_eq(self):
same = [
V20PresExRecord(
pres_ex_id="dummy-0",
thread_id="thread-0",
role=V20PresExRecord.ROLE_PROVER,
)
] * 2
diff = [
V20PresExRecord(
pres_ex_id="dummy-1",
role=V20PresExRecord.ROLE_PROVER,
),
V20PresExRecord(
pres_ex_id="dummy-0",
thread_id="thread-1",
role=V20PresExRecord.ROLE_PROVER,
),
V20PresExRecord(
pres_ex_id="dummy-1",
thread_id="thread-0",
role=V20PresExRecord.ROLE_VERIFIER,
),
]
for i in range(len(same) - 1):
for j in range(i, len(same)):
assert same[i] == same[j]
for i in range(len(diff) - 1):
for j in range(i, len(diff)):
assert diff[i] == diff[j] if i == j else diff[i] != diff[j]
async def test_create_exchange_for_proposal(self):
proposal = V20PresProposal()
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresProposal, "serialize", autospec=True
):
px_rec = await self.manager.create_exchange_for_proposal(
CONN_ID, proposal, auto_present=None
)
save_ex.assert_called_once()
assert px_rec.thread_id == proposal._thread_id
assert px_rec.initiator == V20PresExRecord.INITIATOR_SELF
assert px_rec.role == V20PresExRecord.ROLE_PROVER
assert px_rec.state == V20PresExRecord.STATE_PROPOSAL_SENT
async def test_receive_proposal(self):
connection_record = async_mock.MagicMock(connection_id=CONN_ID)
proposal = V20PresProposal()
with async_mock.patch.object(V20PresExRecord, "save", autospec=True) as save_ex:
px_rec = await self.manager.receive_pres_proposal(
proposal,
connection_record,
)
save_ex.assert_called_once()
assert px_rec.state == V20PresExRecord.STATE_PROPOSAL_RECEIVED
async def test_create_bound_request(self):
comment = "comment"
proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
px_rec = V20PresExRecord(
pres_proposal=proposal.serialize(),
role=V20PresExRecord.ROLE_VERIFIER,
)
px_rec.save = async_mock.CoroutineMock()
(ret_px_rec, pres_req_msg) = await self.manager.create_bound_request(
pres_ex_record=px_rec,
name=PROOF_REQ_NAME,
version=PROOF_REQ_VERSION,
nonce=PROOF_REQ_NONCE,
comment=comment,
)
assert ret_px_rec is px_rec
px_rec.save.assert_called_once()
async def test_create_exchange_for_request(self):
request = async_mock.MagicMock()
request.indy_proof_request = async_mock.MagicMock()
request._thread_id = "dummy"
with async_mock.patch.object(V20PresExRecord, "save", autospec=True) as save_ex:
px_rec = await self.manager.create_exchange_for_request(CONN_ID, request)
save_ex.assert_called_once()
assert px_rec.thread_id == request._thread_id
assert px_rec.initiator == V20PresExRecord.INITIATOR_SELF
assert px_rec.role == V20PresExRecord.ROLE_VERIFIER
assert px_rec.state == V20PresExRecord.STATE_REQUEST_SENT
async def test_receive_pres_request(self):
px_rec_in = V20PresExRecord()
with async_mock.patch.object(V20PresExRecord, "save", autospec=True) as save_ex:
px_rec_out = await self.manager.receive_pres_request(px_rec_in)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_REQUEST_RECEIVED
async def test_create_pres(self):
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
more_magic_rr = async_mock.MagicMock(
get_or_fetch_local_tails_path=async_mock.CoroutineMock(
return_value="/tmp/sample/tails/path"
)
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_decorator, async_mock.patch.object(
test_module, "RevocationRegistry", autospec=True
) as mock_rr:
mock_rr.from_definition = async_mock.MagicMock(return_value=more_magic_rr)
mock_attach_decorator.data_base64 = async_mock.MagicMock(
return_value=mock_attach_decorator
)
req_creds = await indy_proof_req_preview2indy_requested_creds(
INDY_PROOF_REQ_NAME, preview=None, holder=self.holder
)
assert not req_creds["self_attested_attributes"]
assert len(req_creds["requested_attributes"]) == 2
assert len(req_creds["requested_predicates"]) == 1
(px_rec_out, pres_msg) = await self.manager.create_pres(
px_rec_in, req_creds
)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_PRESENTATION_SENT
async def test_create_pres_proof_req_non_revoc_interval_none(self):
indy_proof_req_vcx = deepcopy(INDY_PROOF_REQ_NAME)
indy_proof_req_vcx["non_revoked"] = None # simulate interop with indy-vcx
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req_vcx, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
more_magic_rr = async_mock.MagicMock(
get_or_fetch_local_tails_path=async_mock.CoroutineMock(
return_value="/tmp/sample/tails/path"
)
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_decorator, async_mock.patch.object(
test_module, "RevocationRegistry", autospec=True
) as mock_rr:
mock_rr.from_definition = async_mock.MagicMock(return_value=more_magic_rr)
mock_attach_decorator.data_base64 = async_mock.MagicMock(
return_value=mock_attach_decorator
)
req_creds = await indy_proof_req_preview2indy_requested_creds(
indy_proof_req_vcx, preview=None, holder=self.holder
)
assert not req_creds["self_attested_attributes"]
assert len(req_creds["requested_attributes"]) == 2
assert len(req_creds["requested_predicates"]) == 1
(px_rec_out, pres_msg) = await self.manager.create_pres(
px_rec_in, req_creds
)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_PRESENTATION_SENT
async def test_create_pres_self_asserted(self):
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_SELFIE, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
more_magic_rr = async_mock.MagicMock(
get_or_fetch_local_tails_path=async_mock.CoroutineMock(
return_value="/tmp/sample/tails/path"
)
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_decorator, async_mock.patch.object(
test_module, "RevocationRegistry", autospec=True
) as mock_rr:
mock_rr.from_definition = async_mock.MagicMock(return_value=more_magic_rr)
mock_attach_decorator.data_base64 = async_mock.MagicMock(
return_value=mock_attach_decorator
)
req_creds = await indy_proof_req_preview2indy_requested_creds(
INDY_PROOF_REQ_SELFIE, preview=None, holder=self.holder
)
assert len(req_creds["self_attested_attributes"]) == 3
assert not req_creds["requested_attributes"]
assert not req_creds["requested_predicates"]
(px_rec_out, pres_msg) = await self.manager.create_pres(
px_rec_in, req_creds
)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_PRESENTATION_SENT
async def test_create_pres_no_revocation(self):
Ledger = async_mock.MagicMock(BaseLedger, autospec=True)
self.ledger = Ledger()
self.ledger.get_schema = async_mock.CoroutineMock(
return_value=async_mock.MagicMock()
)
self.ledger.get_credential_definition = async_mock.CoroutineMock(
return_value={"value": {"revocation": None}}
)
self.profile.context.injector.bind_instance(BaseLedger, self.ledger)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
Holder = async_mock.MagicMock(IndyHolder, autospec=True)
self.holder = Holder()
get_creds = async_mock.CoroutineMock(
return_value=(
{
"cred_info": {"referent": "dummy_reft"},
"attrs": {
"player": "Richie Knucklez",
"screenCapture": "aW1hZ2luZSBhIHNjcmVlbiBjYXB0dXJl",
"highScore": "1234560",
},
}, # leave this comma: return a tuple
)
)
self.holder.get_credentials_for_presentation_request_by_referent = get_creds
self.holder.get_credential = async_mock.CoroutineMock(
return_value=json.dumps(
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": None,
"cred_rev_id": None,
}
)
)
self.holder.create_presentation = async_mock.CoroutineMock(return_value="{}")
self.profile.context.injector.bind_instance(IndyHolder, self.holder)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_decorator, async_mock.patch.object(
test_module.LOGGER, "info", async_mock.MagicMock()
) as mock_log_info:
mock_attach_decorator.data_base64 = async_mock.MagicMock(
return_value=mock_attach_decorator
)
req_creds = await indy_proof_req_preview2indy_requested_creds(
INDY_PROOF_REQ_NAME, preview=None, holder=self.holder
)
(px_rec_out, pres_msg) = await self.manager.create_pres(
px_rec_in, req_creds
)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_PRESENTATION_SENT
# exercise superfluous timestamp removal
for pred_reft_spec in req_creds["requested_predicates"].values():
pred_reft_spec["timestamp"] = 1234567890
await self.manager.create_pres(px_rec_in, req_creds)
mock_log_info.assert_called_once()
async def test_create_pres_bad_revoc_state(self):
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
Holder = async_mock.MagicMock(IndyHolder, autospec=True)
self.holder = Holder()
get_creds = async_mock.CoroutineMock(
return_value=(
{
"cred_info": {"referent": "dummy_reft"},
"attrs": {
"player": "Richie Knucklez",
"screenCapture": "aW1hZ2luZSBhIHNjcmVlbiBjYXB0dXJl",
"highScore": "1234560",
},
}, # leave this comma: return a tuple
)
)
self.holder.get_credentials_for_presentation_request_by_referent = get_creds
self.holder.get_credential = async_mock.CoroutineMock(
return_value=json.dumps(
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": RR_ID,
"cred_rev_id": 1,
}
)
)
self.holder.create_presentation = async_mock.CoroutineMock(return_value="{}")
self.holder.create_revocation_state = async_mock.CoroutineMock(
side_effect=test_module.IndyHolderError("Problem", {"message": "Nope"})
)
self.profile.context.injector.bind_instance(IndyHolder, self.holder)
more_magic_rr = async_mock.MagicMock(
get_or_fetch_local_tails_path=async_mock.CoroutineMock(
return_value="/tmp/sample/tails/path"
)
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_decorator, async_mock.patch.object(
test_module, "RevocationRegistry", autospec=True
) as mock_rr:
mock_rr.from_definition = async_mock.MagicMock(return_value=more_magic_rr)
mock_attach_decorator.data_base64 = async_mock.MagicMock(
return_value=mock_attach_decorator
)
req_creds = await indy_proof_req_preview2indy_requested_creds(
INDY_PROOF_REQ_NAME, preview=None, holder=self.holder
)
with self.assertRaises(test_module.IndyHolderError):
await self.manager.create_pres(px_rec_in, req_creds)
async def test_create_pres_multi_matching_proposal_creds_names(self):
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAMES, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
Holder = async_mock.MagicMock(IndyHolder, autospec=True)
self.holder = Holder()
get_creds = async_mock.CoroutineMock(
return_value=(
{
"cred_info": {
"referent": "dummy_reft_0",
"cred_def_id": CD_ID,
"attrs": {
"player": "Richie Knucklez",
"screenCapture": "aW1hZ2luZSBhIHNjcmVlbiBjYXB0dXJl",
"highScore": "1234560",
},
}
},
{
"cred_info": {
"referent": "dummy_reft_1",
"cred_def_id": CD_ID,
"attrs": {
"player": "Richie Knucklez",
"screenCapture": "aW1hZ2luZSBhbm90aGVyIHNjcmVlbiBjYXB0dXJl",
"highScore": "1515880",
},
}
},
)
)
self.holder.get_credentials_for_presentation_request_by_referent = get_creds
self.holder.get_credential = async_mock.CoroutineMock(
return_value=json.dumps(
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": RR_ID,
"cred_rev_id": 1,
}
)
)
self.holder.create_presentation = async_mock.CoroutineMock(return_value="{}")
self.holder.create_revocation_state = async_mock.CoroutineMock(
return_value=json.dumps(
{
"witness": {"omega": "1 ..."},
"rev_reg": {"accum": "21 ..."},
"timestamp": NOW,
}
)
)
self.profile.context.injector.bind_instance(IndyHolder, self.holder)
more_magic_rr = async_mock.MagicMock(
get_or_fetch_local_tails_path=async_mock.CoroutineMock(
return_value="/tmp/sample/tails/path"
)
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
test_module, "AttachDecorator", autospec=True
) as mock_attach_decorator, async_mock.patch.object(
test_module, "RevocationRegistry", autospec=True
) as mock_rr:
mock_rr.from_definition = async_mock.MagicMock(return_value=more_magic_rr)
mock_attach_decorator.data_base64 = async_mock.MagicMock(
return_value=mock_attach_decorator
)
req_creds = await indy_proof_req_preview2indy_requested_creds(
INDY_PROOF_REQ_NAMES, preview=None, holder=self.holder
)
assert not req_creds["self_attested_attributes"]
assert len(req_creds["requested_attributes"]) == 1
assert len(req_creds["requested_predicates"]) == 1
(px_rec_out, pres_msg) = await self.manager.create_pres(
px_rec_in, req_creds
)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_PRESENTATION_SENT
async def test_no_matching_creds_for_proof_req(self):
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAMES, ident="indy")
],
)
px_rec_in = V20PresExRecord(pres_request=pres_request.serialize())
get_creds = async_mock.CoroutineMock(return_value=())
self.holder.get_credentials_for_presentation_request_by_referent = get_creds
with self.assertRaises(ValueError):
await indy_proof_req_preview2indy_requested_creds(
INDY_PROOF_REQ_NAMES, preview=None, holder=self.holder
)
get_creds = async_mock.CoroutineMock(
return_value=(
{
"cred_info": {"referent": "dummy_reft"},
"attrs": {
"player": "Richie Knucklez",
"screenCapture": "aW1hZ2luZSBhIHNjcmVlbiBjYXB0dXJl",
"highScore": "1234560",
},
}, # leave this comma: return a tuple
)
)
self.holder.get_credentials_for_presentation_request_by_referent = get_creds
async def test_receive_pres(self):
connection_record = async_mock.MagicMock(connection_id=CONN_ID)
indy_proof = {
"proof": {
"proofs": [
{
"primary_proof": {
"eq_proof": "...",
"ge_proofs": [
{
"...": "...",
"predicate": {
"attr_name": "highscore",
"p_type": "GE",
"value": 1000000,
},
}
],
}
}
]
},
"requested_proof": {
"revealed_attrs": {
"0_player_uuid": {
"sub_proof_index": 0,
"raw": "Richie Knucklez",
"encoded": "12345678901234567890",
},
"1_screencapture_uuid": {
"sub_proof_index": 0,
"raw": "aW1hZ2luZSBhIHNjcmVlbiBjYXB0dXJl",
"encoded": "98765432109876543210",
},
},
"self_attested_attrs": {},
"unrevealed_attrs": {},
"predicates": {"0_highscore_GE_uuid": {"sub_proof_index": 0}},
},
"identifiers": [
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": None,
"timestamp": None,
}
],
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
pres = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
)
# cover by_format property
by_format = px_rec_dummy.by_format
assert by_format.get("pres_proposal").get("indy") == INDY_PROOF_REQ_NAME
assert by_format.get("pres_request").get("indy") == INDY_PROOF_REQ_NAME
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex, async_mock.patch.object(
self.profile,
"session",
async_mock.MagicMock(return_value=self.profile.session()),
) as session:
retrieve_ex.side_effect = [
StorageNotFoundError("no such record"), # cover out-of-band
px_rec_dummy,
]
px_rec_out = await self.manager.receive_pres(pres, connection_record)
assert retrieve_ex.call_count == 2
save_ex.assert_called_once()
assert px_rec_out.state == (V20PresExRecord.STATE_PRESENTATION_RECEIVED)
async def test_receive_pres_bait_and_switch_attr_name(self):
connection_record = async_mock.MagicMock(connection_id=CONN_ID)
indy_proof_req = deepcopy(INDY_PROOF_REQ_NAME)
indy_proof_req["requested_attributes"]["1_screencapture_uuid"]["restrictions"][
0
][
"attr::screenCapture::value"
] = "c2NyZWVuIGNhcHR1cmUgc2hvd2luZyBzY29yZSBpbiB0aGUgbWlsbGlvbnM="
indy_proof_x = {
"proof": {
"proofs": [
{
"primary_proof": {
"eq_proof": "...",
"ge_proofs": [
{
"...": "...",
"predicate": {
"attr_name": "highscore",
"p_type": "GE",
"value": 1000000,
},
}
],
}
}
]
},
"requested_proof": {
"revealed_attrs": {
"0_player_uuid": {
"sub_proof_index": 0,
"raw": "Richie Knucklez",
"encoded": "12345678901234567890",
},
"1_screencapture_uuid": { # mismatch vs request
"sub_proof_index": 0,
"raw": "bm90IHRoZSBzYW1lIHNjcmVlbiBjYXB0dXJl",
"encoded": "98765432109876543210",
},
},
"self_attested_attrs": {},
"unrevealed_attrs": {},
"predicates": {"0_highscore_GE_uuid": {"sub_proof_index": 0}},
},
"identifiers": [
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": None,
"timestamp": None,
}
],
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "does not satisfy proof request restrictions" in str(
context.exception
)
indy_proof_req["requested_attributes"]["shenanigans"] = indy_proof_req[
"requested_attributes"
].pop("1_screencapture_uuid")
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "Presentation referent" in str(context.exception)
async def test_receive_pres_bait_and_switch_attr_names(self):
connection_record = async_mock.MagicMock(connection_id=CONN_ID)
indy_proof_req = deepcopy(INDY_PROOF_REQ_NAMES)
indy_proof_req["requested_attributes"]["0_player_uuid"]["restrictions"][0][
"attr::screenCapture::value"
] = "c2NyZWVuIGNhcHR1cmUgc2hvd2luZyBzY29yZSBpbiB0aGUgbWlsbGlvbnM="
indy_proof_x = {
"proof": {
"proofs": [
{
"primary_proof": {
"eq_proof": "...",
"ge_proofs": [
{
"...": "...",
"predicate": {
"attr_name": "highscore",
"p_type": "GE",
"value": 1000000,
},
}
],
}
}
]
},
"requested_proof": {
"revealed_attrs": {},
"revealed_attr_groups": {
"0_player_uuid": {
"sub_proof_index": 0,
"values": {
"player": {
"raw": "Richie Knucklez",
"encoded": "12345678901234567890",
},
"0_player_uuid": { # mismatch vs request
"raw": "bm90IHRoZSBzYW1lIHNjcmVlbiBjYXB0dXJl",
"encoded": "98765432109876543210",
},
},
},
},
"self_attested_attrs": {},
"unrevealed_attrs": {},
"predicates": {"0_highscore_GE_uuid": {"sub_proof_index": 0}},
},
"identifiers": [
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": None,
"timestamp": None,
}
],
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "does not satisfy proof request restrictions " in str(
context.exception
)
indy_proof_req["requested_attributes"]["shenanigans"] = indy_proof_req[
"requested_attributes"
].pop("0_player_uuid")
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "Presentation referent" in str(context.exception)
async def test_receive_pres_bait_and_switch_pred(self):
connection_record = async_mock.MagicMock(connection_id=CONN_ID)
indy_proof_req = deepcopy(INDY_PROOF_REQ_NAME)
indy_proof_req["requested_predicates"] = {}
indy_proof_x = {
"proof": {
"proofs": [
{
"primary_proof": {
"eq_proof": "...",
"ge_proofs": [
{
"...": "...",
"predicate": {
"attr_name": "highscore",
"p_type": "GE",
"value": 1000000,
},
}
],
}
}
]
},
"requested_proof": {
"revealed_attrs": {
"0_player_uuid": {
"sub_proof_index": 0,
"raw": "Richie Knucklez",
"encoded": "12345678901234567890",
},
"1_screencapture_uuid": { # mismatch vs request
"sub_proof_index": 0,
"raw": "bm90IHRoZSBzYW1lIHNjcmVlbiBjYXB0dXJl",
"encoded": "98765432109876543210",
},
},
"self_attested_attrs": {},
"unrevealed_attrs": {},
"predicates": {"0_highscore_GE_uuid": {"sub_proof_index": 0}},
},
"identifiers": [
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": None,
"timestamp": None,
}
],
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "not in proposal request" in str(context.exception)
indy_proof_req["requested_predicates"]["0_highscore_GE_uuid"] = {
"name": "shenanigans",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "shenanigans not in presentation" in str(context.exception)
indy_proof_req["requested_predicates"]["0_highscore_GE_uuid"] = {
"name": "highScore",
"p_type": ">=",
"p_value": 8000000, # propose >= 8 million, prove >= 1 million
"restrictions": [{"cred_def_id": CD_ID}],
"non_revoked": {"from": NOW, "to": NOW},
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "highScore mismatches proposal request" in str(context.exception)
indy_proof_req["requested_predicates"]["0_highscore_GE_uuid"] = {
"name": "highScore",
"p_type": ">=",
"p_value": 1000000,
"restrictions": [{"issuer_did": "FFFFFFFFFFFFFFFFFFFFFF"}], # fake issuer
"non_revoked": {"from": NOW, "to": NOW},
}
pres_proposal = V20PresProposal(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_PROPOSAL][
V20PresFormat.Format.INDY.api
],
)
],
proposals_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(indy_proof_req, ident="indy")
],
)
pres_x = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof_x, ident="indy")
],
)
px_rec_dummy = V20PresExRecord(
pres_proposal=pres_proposal.serialize(),
pres_request=pres_request.serialize(),
pres=pres_x.serialize(),
)
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
with self.assertRaises(V20PresManagerError) as context:
await self.manager.receive_pres(pres_x, connection_record)
assert "does not satisfy proof request restrictions " in str(
context.exception
)
async def test_verify_pres(self):
indy_proof = {
"proof": {"proofs": []},
"requested_proof": {
"revealed_attrs": {
"0_player_uuid": {
"sub_proof_index": 0,
"raw": "Richie Knucklez",
"encoded": "12345678901234567890",
},
"1_screencapture_uuid": {
"sub_proof_index": 0,
"raw": "cG90YXRv",
"encoded": "98765432109876543210",
},
},
"self_attested_attrs": {},
"unrevealed_attrs": {},
"predicates": {"0_highscore_GE_uuid": {"sub_proof_index": 0}},
},
"identifiers": [
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": None,
"timestamp": None,
}
],
}
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
will_confirm=True,
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
pres = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof, ident="indy")
],
)
px_rec_in = V20PresExRecord(
pres_request=pres_request.serialize(),
pres=pres.serialize(),
)
with async_mock.patch.object(V20PresExRecord, "save", autospec=True) as save_ex:
px_rec_out = await self.manager.verify_pres(px_rec_in)
save_ex.assert_called_once()
assert px_rec_out.state == (V20PresExRecord.STATE_DONE)
async def test_verify_pres_with_revocation(self):
indy_proof = {
"proof": {"proofs": []},
"requested_proof": {
"revealed_attrs": {
"0_player_uuid": {
"sub_proof_index": 0,
"raw": "Richie Knucklez",
"encoded": "12345678901234567890",
},
"1_screencapture_uuid": {
"sub_proof_index": 0,
"raw": "cG90YXRv",
"encoded": "98765432109876543210",
},
},
"self_attested_attrs": {},
"unrevealed_attrs": {},
"predicates": {"0_highscore_GE_uuid": {"sub_proof_index": 0}},
},
"identifiers": [
{
"schema_id": S_ID,
"cred_def_id": CD_ID,
"rev_reg_id": RR_ID,
"timestamp": NOW,
}
],
}
pres_request = V20PresRequest(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20_REQUEST][
V20PresFormat.Format.INDY.api
],
)
],
request_presentations_attach=[
AttachDecorator.data_base64(INDY_PROOF_REQ_NAME, ident="indy")
],
)
pres = V20Pres(
formats=[
V20PresFormat(
attach_id="indy",
format_=ATTACHMENT_FORMAT[PRES_20][V20PresFormat.Format.INDY.api],
)
],
presentations_attach=[
AttachDecorator.data_base64(indy_proof, ident="indy")
],
)
px_rec_in = V20PresExRecord(
pres_request=pres_request.serialize(),
pres=pres.serialize(),
)
with async_mock.patch.object(V20PresExRecord, "save", autospec=True) as save_ex:
px_rec_out = await self.manager.verify_pres(px_rec_in)
save_ex.assert_called_once()
assert px_rec_out.state == (V20PresExRecord.STATE_DONE)
async def test_send_pres_ack(self):
px_rec = V20PresExRecord()
responder = MockResponder()
self.profile.context.injector.bind_instance(BaseResponder, responder)
await self.manager.send_pres_ack(px_rec)
messages = responder.messages
assert len(messages) == 1
async def test_send_pres_ack_no_responder(self):
px_rec = V20PresExRecord()
self.profile.context.injector.clear_binding(BaseResponder)
await self.manager.send_pres_ack(px_rec)
async def test_receive_pres_ack(self):
conn_record = async_mock.MagicMock(connection_id=CONN_ID)
px_rec_dummy = V20PresExRecord()
message = async_mock.MagicMock()
with async_mock.patch.object(
V20PresExRecord, "save", autospec=True
) as save_ex, async_mock.patch.object(
V20PresExRecord, "retrieve_by_tag_filter", autospec=True
) as retrieve_ex:
retrieve_ex.return_value = px_rec_dummy
px_rec_out = await self.manager.receive_pres_ack(message, conn_record)
save_ex.assert_called_once()
assert px_rec_out.state == V20PresExRecord.STATE_DONE
| 37.860366 | 88 | 0.517031 | [
"Apache-2.0"
] | wadeking98/aries-cloudagent-python | aries_cloudagent/protocols/present_proof/v2_0/tests/test_manager.py | 62,091 | Python |
import setuptools
with open('README.md', 'r') as file:
long_description = file.read()
setuptools.setup(
name='anvil-parser',
version='0.5.2',
author='mat',
description='A Minecraft anvil file format parser',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/matcool/anvil-parser',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=[
'nbt',
'frozendict',
]
)
| 25.92 | 55 | 0.640432 | [
"MIT"
] | MrMallIronmaker/anvil-parser | setup.py | 648 | Python |
# Script that cuts the greet string into chunks and prints it out
greet = "Hello World!"
print(greet)
print("Start: ", greet[0:3])
print("Middle: ", greet[3:6])
print("End: ", greet[-3:])
a = greet.find(",")
print("Portion before comma", greet[:a])
| 19.461538 | 65 | 0.652174 | [
"Unlicense"
] | Memeklos/Programming-Code-Snippets | Python/CTF/greet.py | 253 | Python |
# -*- coding: utf-8 -*-
""" NETWORK
This module defines the BlendHunter class which can be used to retrain the
network or use predefined weights to make predictions on unseen data.
:Author: Samuel Farrens <samuel.farrens@cea.fr>
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
from cv2 import imread
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.applications import VGG16
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.callbacks import ReduceLROnPlateau
class BlendHunter(object):
""" BlendHunter
Class for identifying blended galaxy images in postage stamps.
Parameters
----------
image_shape : tuple, optional
Expected shape of input images
classes : tuple, optional
List of classes, default is ('blended', 'not_blended')
weights_path : str, optional
Path to weights, default is './weights'
top_model_file : str, optional
File name for top model weights, default is 'top_model_weights'
final_model_file : str, optional
File name of the final model weights, default is
'final_model_weights'
"""
def __init__(self, image_shape=None, classes=('blended', 'not_blended'),
weights_path='./weights', top_model_file='top_model_weights',
final_model_file='final_model_weights', verbose=0):
self._image_shape = image_shape
self._classes = classes
self._weights_path = weights_path
self._top_model_file = self._format(weights_path, top_model_file)
self._final_model_file = self._format(weights_path, final_model_file)
self._verbose = verbose
self.history = None
@staticmethod
def _format(path, name):
""" Format
Add path to name.
Parameters
----------
path : str
Base path
name : str
Path extension
Returns
-------
str
Formated path
"""
return '{}/{}'.format(path, name)
def getkwarg(self, key, default=None):
""" Get keyword agrument
Get value from keyword agruments if it exists otherwise return default.
Parameters
----------
key : str
Dictionary key
default : optional
Default value
"""
return self._kwargs[key] if key in self._kwargs else default
@staticmethod
def _get_image_shape(file):
""" Get Image Shape
Get the input image shape from an example image.
Parameters
----------
file : str
File name
Returns
-------
tuple
Image shape
"""
return imread(file).shape
def _get_target_shape(self, image_path=None):
""" Get Target Shape
Get the network target shape from the image shape.
Parameters
----------
image_path : str, optional
Path to image file
"""
if isinstance(self._image_shape, type(None)) and image_path:
file = self._format(image_path, os.listdir(image_path)[0])
self._image_shape = self._get_image_shape(file)
self._target_size = self._image_shape[:2]
def _load_generator(self, input_dir, batch_size=None,
class_mode=None, augmentation=False):
""" Load Generator
Load files from an input directory into a Keras generator.
Parameters
----------
input_dir : str
Input directory
batch_size : int, optional
Batch size
class_mode : str, optional
Generator class mode
shuffle : bool, optional
Option to shuffle input files
Returns
-------
keras_preprocessing.image.DirectoryIterator
Keras generator
"""
if augmentation:
datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = (datagen.flow_from_directory(input_dir,
target_size=self._target_size,
batch_size=batch_size, class_mode=class_mode,
shuffle=False))
generator.steps = generator.n // generator.batch_size
return generator
def _get_feature(self, input_dir):
""" Get Feature
Get network feature and labels from VGG16 model.
Parameters
----------
input_dir : str
Input directory
Returns
-------
tuple
VGG16 bottleneck feature, class labels
"""
generator = self._load_generator(input_dir,
batch_size=self._batch_size_top)
labels = generator.classes[:generator.steps * self._batch_size_top]
return (self._vgg16_model.predict_generator(generator,
generator.steps), labels)
@staticmethod
def _save_data(data, data_type, file_path):
""" Save Data
Save data to file.
Parameters
----------
data : np.ndarray
Output data
data_type : str
Type of feature to be saved
file_path : str
File path
"""
file_name = '{}_{}.npy'.format(file_path, data_type)
np.save(file_name, data)
@staticmethod
def _load_data(data_type, file_path):
""" Load Data
Load data from file.
Parameters
----------
data_type : str
Type of feature to be loaded
file_path : str
File path
"""
file_name = '{}_{}.npy'.format(file_path, data_type)
if os.path.isfile(file_name):
return np.load(file_name)
else:
raise IOError('{} not found'.format(file_name))
@staticmethod
def _build_vgg16_model(input_shape=None):
""" Build VGG16 Model
Build VGG16 CNN model using imagenet weights.
Parameters
----------
input_shape : str, optional
Input data shape
Returns
-------
VGG16 model
"""
return VGG16(include_top=False, weights='imagenet',
input_shape=input_shape)
def _get_features(self):
""" Get Features
Get the network (bottleneck) features from the VGG16 model.
"""
self._vgg16_model = self._build_vgg16_model()
for key, value in self._features.items():
bot_feat, labels = self._get_feature(value['dir'])
if self._save_bottleneck:
self._save_data(bot_feat, key, self._bottleneck_file)
if self._save_labels:
self._save_data(labels, key, self._labels_file)
value['bottleneck'] = bot_feat
value['labels'] = labels
def _load_features(self):
""" Load Bottleneck Features
Load VGG16 bottleneck features.
"""
for feature_name in ('bottleneck', 'labels'):
if feature_name == 'bottleneck':
out_path = self._bottleneck_file
else:
out_path = self._labels_file
for key, value in self._features.items():
if feature_name not in value:
value[feature_name] = self._load_data(key, out_path)
@staticmethod
def _build_top_model(input_shape, dense_output=(256, 1024), dropout=0.1):
""" Build Top Model
Build the fully connected layers of the network.
Parameters
----------
input_shape : tuple
Input data shape
dense_output : tuple, optional
Size of dense output layers, default is (256, 1024)
dropout : float, optional
Dropout rate, default is 0.1
Returns
-------
keras.model
Fully connected top model
"""
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(dense_output[0]))
model.add(Dropout(dropout))
model.add(Dense(dense_output[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model
def _train_top_model(self):
""" Train Top Model
Train fully connected top model of the network.
"""
self._load_features()
model = (self._build_top_model(
input_shape=self._features['train']['bottleneck'].shape[1:]))
model.compile(optimizer=self.getkwarg('top_opt', 'adam'),
loss=self.getkwarg('top_loss', 'binary_crossentropy'),
metrics=self.getkwarg('top_metrics', ['accuracy']))
top_model_file = '{}.h5'.format(self._top_model_file)
callbacks = []
callbacks.append(ModelCheckpoint(top_model_file,
monitor='val_loss', verbose=self._verbose,
save_best_only=True, save_weights_only=True,
mode='auto', period=1))
if self.getkwarg('top_early_stop', True):
min_delta = self.getkwarg('top_min_delta', 0.001)
patience = self.getkwarg('top_patience', 10)
callbacks.append(EarlyStopping(monitor='val_loss',
min_delta=min_delta,
patience=patience,
verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_delta=0.001,
cooldown=2, verbose=self._verbose))
self.history = (model.fit(self._features['train']['bottleneck'],
self._features['train']['labels'],
epochs=self._epochs_top,
batch_size=self._batch_size_top,
callbacks=callbacks,
validation_data=(self._features['valid']['bottleneck'],
self._features['valid']['labels']),
verbose=self._verbose))
model.save_weights(top_model_file)
def plot_history(self):
""" Plot History
Plot the training history metrics.
"""
sns.set(style="darkgrid")
if not isinstance(self.history, type(None)):
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.subplot(122)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show()
else:
print('No history to display. Run training first.')
def _freeze_layers(self, model, depth):
""" Freeze Network Layers
Parameters
----------
model :
Keras model
depth : int
Depth of layers to be frozen
"""
for layer in model.layers[:depth]:
layer.trainable = False
def _build_final_model(self, load_top_weights=False,
load_final_weights=False):
""" Build Final Model
Build the final BlendHunter model.
Parameters
----------
load_top_weights : bool
Option to load the top model weights
load_final_weights : bool
Option to load the final model weights
Returns
-------
Final model
"""
vgg16_model = self._build_vgg16_model(self._image_shape)
top_model = self._build_top_model(vgg16_model.output_shape[1:],
dropout=0.4)
if load_top_weights:
top_model.load_weights('{}.h5'.format(self._top_model_file))
model = Model(inputs=vgg16_model.input,
outputs=top_model(vgg16_model.output))
if load_final_weights:
model.load_weights('{}.h5'.format(self._final_model_file))
return model
def _fine_tune(self):
""" Fine Tune
Fine tune the final model training.
"""
model = self._build_final_model(load_top_weights=True)
self._freeze_layers(model, 18)
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0001),
metrics=['binary_accuracy'])
train_gen = self._load_generator(self._features['train']['dir'],
batch_size=self._batch_size_fine,
class_mode='binary',
augmentation=True)
valid_gen = self._load_generator(self._features['valid']['dir'],
batch_size=self._batch_size_fine,
class_mode='binary')
callbacks = []
callbacks.append(ModelCheckpoint('{}.h5'.format(self._fine_tune_file),
monitor='val_loss', verbose=self._verbose,
save_best_only=True, save_weights_only=True,
mode='auto', period=1))
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0.001,
patience=10, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_delta=0.001,
cooldown=2, verbose=self._verbose))
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps,
epochs=self._epochs_fine,
callbacks=callbacks,
validation_data=valid_gen,
validation_steps=valid_gen.steps,
verbose=self._verbose)
self._freeze_layers(model, 19)
model.layers[17].trainable = True
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=10e-5),
metrics=['binary_accuracy'])
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps,
epochs=self._epochs_fine,
callbacks=callbacks,
validation_data=valid_gen,
validation_steps=valid_gen.steps,
verbose=self._verbose)
model.save_weights('{}.h5'.format(self._final_model_file))
def train(self, input_path, get_features=True, train_top=True,
fine_tune=True, train_dir_name='train',
valid_dir_name='validation', epochs_top=500, epochs_fine=50,
batch_size_top=250, batch_size_fine=16, save_bottleneck=True,
bottleneck_file='bottleneck_features',
save_labels=True, labels_file='labels',
fine_tune_file='fine_tune_checkpoint',
top_model_file='top_model_weights', **kwargs):
""" Train
Train the BlendHunter network.
Parameters
----------
input_path : str
Path to input data
get_features : bool, optional
Option to get bottleneck features, default is True
train_top : bool, optional
Option to train top model, default is True
fine_tune : bool, optional
Option to run fine tuning component of training, default is True
train_dir_name : str, optional
Training data directory name, default is 'train'
valid_dir_name : str, optional
Validation data directory name, default is 'validation'
epochs_top : int, optional
Number of training epochs for top model, default is 500
epochs_fine : int, optional
Number of training epochs for fine tuning, default is 50
batch_size_top : int, optional
Batch size for top model, default is 256
batch_size_fine : int, optional
Batch size for fine tuning, default is 16
save_bottleneck : bool, optional
Option to save bottleneck features, default is True
bottleneck_file : str, optional
File name for bottleneck features, default is
'bottleneck_features'
fine_tune_file : str, optional
Training checkpoint for the fine tuning step, default is
'fine_tune_checkpoint'
"""
start = time()
self._epochs_top = epochs_top
self._epochs_fine = epochs_fine
self._batch_size_top = batch_size_top
self._batch_size_fine = batch_size_fine
self._save_bottleneck = save_bottleneck
self._save_labels = save_labels
self._bottleneck_file = self._format(self._weights_path,
bottleneck_file)
self._labels_file = self._format(self._weights_path, labels_file)
self._fine_tune_file = self._format(self._weights_path, fine_tune_file)
self._features = {'train': {}, 'valid': {}}
self._features['train']['dir'] = self._format(input_path,
train_dir_name)
self._features['valid']['dir'] = self._format(input_path,
valid_dir_name)
self._kwargs = kwargs
self._get_target_shape(self._format(self._features['train']['dir'],
self._classes[0]))
if get_features:
self._get_features()
if train_top:
self._train_top_model()
if fine_tune:
self._fine_tune()
end = time()
print('Duration {:0.2f}s'.format(end - start))
def predict(self, input_path=None, input_path_keras=None, input_data=None,
weights_type='fine'):
""" Predict
Predict classes for test data
Parameters
----------
input_path : str
Path to input data
input_path_keras : str
Path to input data in Keras format, i.e. path to directory one
level above where the data is stored
input_data : np.ndarray
Array of input images
weights_type : str, optional {'fine', 'top'}
Type of weights to use for predition, default is 'fine'
Returns
-------
dict
Dictionary of file names and corresponding classes
"""
if input_path:
test_path = '/'.join(input_path.split('/')[:-1])
elif input_path_keras:
test_path = input_path_keras
else:
test_path = None
if weights_type not in ('fine', 'top'):
raise ValueError('Invalid value for weights_type. Options are '
'"fine" or "top"')
if test_path:
self._get_target_shape(self._format(test_path,
os.listdir(test_path)[0]))
if weights_type == 'fine':
model = self._build_final_model(load_final_weights=True)
elif weights_type == 'top':
model = self._build_final_model(load_top_weights=True)
test_gen = self._load_generator(test_path,
class_mode='categorical',
batch_size=1)
self.filenames = test_gen.filenames
test_gen.reset()
res = model.predict_generator(test_gen,
verbose=self._verbose,
steps=test_gen.steps).flatten()
elif not isinstance(input_data, type(None)):
self._image_shape = input_data.shape[1:]
self._get_target_shape()
model = self._build_final_model(load_final_weights=True)
res = model.predict(input_data, verbose=self._verbose).flatten()
else:
raise RuntimeError('No input data provided.')
labels = {0: self._classes[0], 1: self._classes[1]}
preds = [labels[k] for k in np.around(res)]
return preds
| 31.537313 | 79 | 0.555419 | [
"MIT"
] | CosmoStat/BlendHunter | blendhunter/network.py | 21,130 | Python |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
| 33.719355 | 107 | 0.604802 | [
"BSD-2-Clause"
] | yashrsharma44/sunpy | sunpy/timeseries/timeseriesbase.py | 20,906 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras' base preprocessing layer."""
import os
import keras
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import base_preprocessing_layer
import numpy as np
import tensorflow.compat.v2 as tf
# Define a test-only implementation of BasePreprocessingLayer to validate
# its correctness directly.
class AddingPreprocessingLayer(base_preprocessing_layer.PreprocessingLayer):
def build(self, input_shape):
super(AddingPreprocessingLayer, self).build(input_shape)
self.sum = tf.Variable(0., dtype=tf.float32)
def update_state(self, data):
self.sum.assign_add(tf.reduce_sum(tf.cast(data, tf.float32)))
def reset_state(self): # pylint: disable=method-hidden
self.sum.assign(0.)
def set_total(self, sum_value):
"""This is an example of how a subclass would implement a direct setter.
Args:
sum_value: The total to set.
"""
self.sum.assign(sum_value)
def call(self, inputs):
return inputs + self.sum
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class PreprocessingLayerTest(keras_parameterized.TestCase):
def test_adapt_bad_input_fails(self):
"""Test that non-Dataset/Numpy inputs cause a reasonable error."""
input_dataset = {"foo": 0}
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, "Failed to find data adapter"):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError, "requires a"):
layer.adapt(input_dataset)
def test_adapt_infinite_dataset_fails(self):
"""Test that preproc layers fail if an infinite dataset is passed."""
input_dataset = tf.data.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]])).repeat()
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, "infinite dataset"):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError,
".*infinite number of elements.*"):
layer.adapt(input_dataset)
def test_setter_update(self):
"""Test the prototyped setter method."""
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.set_total(15)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_pre_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_post_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_pre_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = tf.data.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]]))
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_post_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = tf.data.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]]))
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_weight_based_state_transfer(self):
"""Test that preproc layers can transfer state via get/set weights.."""
def get_model():
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
model, layer = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
# Create a new model and verify it has no state carryover.
weights = model.get_weights()
model_2, _ = get_model()
self.assertAllEqual([[1], [2], [3]], model_2.predict([1., 2., 3.]))
# Transfer state from model to model_2 via get/set weights.
model_2.set_weights(weights)
self.assertAllEqual([[16], [17], [18]], model_2.predict([1., 2., 3.]))
def test_loading_without_providing_class_fails(self):
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
with self.assertRaisesRegex(RuntimeError, "Unable to restore a layer of"):
_ = keras.models.load_model(output_path)
def test_adapt_sets_input_shape_rank(self):
"""Check that `.adapt()` sets the `input_shape`'s rank."""
# Shape: (3,1,2)
adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]],
dtype=np.float32)
layer = AddingPreprocessingLayer()
layer.adapt(adapt_dataset)
input_dataset = np.array([[[1., 2.], [3., 4.]], [[3., 4.], [5., 6.]]],
dtype=np.float32)
layer(input_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, None, None))
def test_adapt_doesnt_overwrite_input_shape(self):
"""Check that `.adapt()` doesn't change the `input_shape`."""
# Shape: (3, 1, 2)
adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]],
dtype=np.float32)
layer = AddingPreprocessingLayer(input_shape=[1, 2])
layer.adapt(adapt_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, 1, 2))
class PreprocessingLayerV1Test(keras_parameterized.TestCase):
def test_adapt_fails(self):
"""Test that calling adapt leads to a runtime error."""
input_dataset = {"foo": 0}
with tf.Graph().as_default():
layer = AddingPreprocessingLayer()
with self.assertRaisesRegex(RuntimeError,
"`adapt` is only supported in tensorflow v2"):
layer.adapt(input_dataset)
if __name__ == "__main__":
tf.test.main()
| 35.117647 | 80 | 0.671094 | [
"Apache-2.0"
] | 01-vyom/keras | keras/engine/base_preprocessing_layer_test.py | 8,358 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""tracking_url
Revision ID: ca69c70ec99b
Revises: a65458420354
Create Date: 2017-07-26 20:09:52.606416
"""
# revision identifiers, used by Alembic.
revision = "ca69c70ec99b"
down_revision = "a65458420354"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.add_column("query", sa.Column("tracking_url", sa.Text(), nullable=True))
def downgrade():
op.drop_column("query", "tracking_url")
| 31 | 79 | 0.762903 | [
"Apache-2.0"
] | 1AB9502/incubator-superset | superset/migrations/versions/ca69c70ec99b_tracking_url.py | 1,240 | Python |
"""An abstract class for entities."""
from __future__ import annotations
from abc import ABC
import asyncio
from collections.abc import Awaitable, Iterable, Mapping, MutableMapping
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum, auto
import functools as ft
import logging
import math
import sys
from timeit import default_timer as timer
from typing import Any, Final, Literal, TypedDict, final
import voluptuous as vol
from homeassistant.backports.enum import StrEnum
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ATTRIBUTION,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_DEFAULT_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import (
CALLBACK_TYPE,
Context,
Event,
HomeAssistant,
callback,
split_entity_id,
)
from homeassistant.exceptions import HomeAssistantError, NoEntitySpecifiedError
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util, ensure_unique_string, slugify
from . import entity_registry as er
from .device_registry import DeviceEntryType
from .entity_platform import EntityPlatform
from .event import async_track_entity_registry_updated_event
from .typing import StateType
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
DATA_ENTITY_SOURCE = "entity_info"
SOURCE_CONFIG_ENTRY = "config_entry"
SOURCE_PLATFORM_CONFIG = "platform_config"
# Used when converting float states to string: limit precision according to machine
# epsilon to make the string representation readable
FLOAT_PRECISION = abs(int(math.floor(math.log10(abs(sys.float_info.epsilon))))) - 1
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> dict[str, dict[str, str]]:
"""Get the entity sources."""
return hass.data.get(DATA_ENTITY_SOURCE, {})
def generate_entity_id(
entity_id_format: str,
name: str | None,
current_ids: list[str] | None = None,
hass: HomeAssistant | None = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
return async_generate_entity_id(entity_id_format, name, current_ids, hass)
@callback
def async_generate_entity_id(
entity_id_format: str,
name: str | None,
current_ids: Iterable[str] | None = None,
hass: HomeAssistant | None = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if current_ids is not None:
return ensure_unique_string(preferred_string, current_ids)
if hass is None:
raise ValueError("Missing required parameter current_ids or hass")
test_string = preferred_string
tries = 1
while not hass.states.async_available(test_string):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
def get_capability(hass: HomeAssistant, entity_id: str, capability: str) -> Any | None:
"""Get a capability attribute of an entity.
First try the statemachine, then entity registry.
"""
if state := hass.states.get(entity_id):
return state.attributes.get(capability)
entity_registry = er.async_get(hass)
if not (entry := entity_registry.async_get(entity_id)):
raise HomeAssistantError(f"Unknown entity {entity_id}")
return entry.capabilities.get(capability) if entry.capabilities else None
def get_device_class(hass: HomeAssistant, entity_id: str) -> str | None:
"""Get device class of an entity.
First try the statemachine, then entity registry.
"""
if state := hass.states.get(entity_id):
return state.attributes.get(ATTR_DEVICE_CLASS)
entity_registry = er.async_get(hass)
if not (entry := entity_registry.async_get(entity_id)):
raise HomeAssistantError(f"Unknown entity {entity_id}")
return entry.device_class or entry.original_device_class
def get_supported_features(hass: HomeAssistant, entity_id: str) -> int:
"""Get supported features for an entity.
First try the statemachine, then entity registry.
"""
if state := hass.states.get(entity_id):
return state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
entity_registry = er.async_get(hass)
if not (entry := entity_registry.async_get(entity_id)):
raise HomeAssistantError(f"Unknown entity {entity_id}")
return entry.supported_features or 0
def get_unit_of_measurement(hass: HomeAssistant, entity_id: str) -> str | None:
"""Get unit of measurement class of an entity.
First try the statemachine, then entity registry.
"""
if state := hass.states.get(entity_id):
return state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
entity_registry = er.async_get(hass)
if not (entry := entity_registry.async_get(entity_id)):
raise HomeAssistantError(f"Unknown entity {entity_id}")
return entry.unit_of_measurement
class DeviceInfo(TypedDict, total=False):
"""Entity device information for device registry."""
configuration_url: str | None
connections: set[tuple[str, str]]
default_manufacturer: str
default_model: str
default_name: str
entry_type: DeviceEntryType | None
identifiers: set[tuple[str, str]]
manufacturer: str | None
model: str | None
name: str | None
suggested_area: str | None
sw_version: str | None
hw_version: str | None
via_device: tuple[str, str]
class EntityCategory(StrEnum):
"""Category of an entity.
An entity with a category will:
- Not be exposed to cloud, Alexa, or Google Assistant components
- Not be included in indirect service calls to devices or areas
"""
# Config: An entity which allows changing the configuration of a device
CONFIG = "config"
# Diagnostic: An entity exposing some configuration parameter or diagnostics of a device
DIAGNOSTIC = "diagnostic"
# System: An entity which is not useful for the user to interact with
SYSTEM = "system"
ENTITY_CATEGORIES_SCHEMA: Final = vol.Coerce(EntityCategory)
class EntityPlatformState(Enum):
"""The platform state of an entity."""
# Not Added: Not yet added to a platform, polling updates are written to the state machine
NOT_ADDED = auto()
# Added: Added to a platform, polling updates are written to the state machine
ADDED = auto()
# Removed: Removed from a platform, polling updates are not written to the state machine
REMOVED = auto()
@dataclass
class EntityDescription:
"""A class that describes Home Assistant entities."""
# This is the key identifier for this entity
key: str
device_class: str | None = None
entity_category: EntityCategory | None = None
entity_registry_enabled_default: bool = True
entity_registry_visible_default: bool = True
force_update: bool = False
icon: str | None = None
name: str | None = None
unit_of_measurement: str | None = None
class Entity(ABC):
"""An abstract class for Home Assistant entities."""
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id: str = None # type: ignore[assignment]
# Owning hass instance. Will be set by EntityPlatform
# While not purely typed, it makes typehinting more useful for us
# and removes the need for constant None checks or asserts.
hass: HomeAssistant = None # type: ignore[assignment]
# Owning platform instance. Will be set by EntityPlatform
platform: EntityPlatform | None = None
# Entity description instance for this Entity
entity_description: EntityDescription
# If we reported if this entity was slow
_slow_reported = False
# If we reported this entity is updated while disabled
_disabled_reported = False
# If we reported this entity is relying on deprecated temperature conversion
_temperature_reported = False
# Protect for multiple updates
_update_staged = False
# Process updates in parallel
parallel_updates: asyncio.Semaphore | None = None
# Entry in the entity registry
registry_entry: er.RegistryEntry | None = None
# Hold list for functions to call on remove.
_on_remove: list[CALLBACK_TYPE] | None = None
# Context
_context: Context | None = None
_context_set: datetime | None = None
# If entity is added to an entity platform
_platform_state = EntityPlatformState.NOT_ADDED
# Entity Properties
_attr_assumed_state: bool = False
_attr_attribution: str | None = None
_attr_available: bool = True
_attr_context_recent_time: timedelta = timedelta(seconds=5)
_attr_device_class: str | None
_attr_device_info: DeviceInfo | None = None
_attr_entity_category: EntityCategory | None
_attr_entity_picture: str | None = None
_attr_entity_registry_enabled_default: bool
_attr_entity_registry_visible_default: bool
_attr_extra_state_attributes: MutableMapping[str, Any]
_attr_force_update: bool
_attr_icon: str | None
_attr_name: str | None
_attr_should_poll: bool = True
_attr_state: StateType = STATE_UNKNOWN
_attr_supported_features: int | None = None
_attr_unique_id: str | None = None
_attr_unit_of_measurement: str | None
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return self._attr_should_poll
@property
def unique_id(self) -> str | None:
"""Return a unique ID."""
return self._attr_unique_id
@property
def name(self) -> str | None:
"""Return the name of the entity."""
if hasattr(self, "_attr_name"):
return self._attr_name
if hasattr(self, "entity_description"):
return self.entity_description.name
return None
@property
def state(self) -> StateType:
"""Return the state of the entity."""
return self._attr_state
@property
def capability_attributes(self) -> Mapping[str, Any] | None:
"""Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def state_attributes(self) -> dict[str, Any] | None:
"""Return the state attributes.
Implemented by component base class, should not be extended by integrations.
Convention for attribute names is lowercase snake_case.
"""
return None
@property
def device_state_attributes(self) -> Mapping[str, Any] | None:
"""Return entity specific state attributes.
This method is deprecated, platform classes should implement
extra_state_attributes instead.
"""
return None
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
if hasattr(self, "_attr_extra_state_attributes"):
return self._attr_extra_state_attributes
return None
@property
def device_info(self) -> DeviceInfo | None:
"""Return device specific attributes.
Implemented by platform classes.
"""
return self._attr_device_info
@property
def device_class(self) -> str | None:
"""Return the class of this device, from component DEVICE_CLASSES."""
if hasattr(self, "_attr_device_class"):
return self._attr_device_class
if hasattr(self, "entity_description"):
return self.entity_description.device_class
return None
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of this entity, if any."""
if hasattr(self, "_attr_unit_of_measurement"):
return self._attr_unit_of_measurement
if hasattr(self, "entity_description"):
return self.entity_description.unit_of_measurement
return None
@property
def icon(self) -> str | None:
"""Return the icon to use in the frontend, if any."""
if hasattr(self, "_attr_icon"):
return self._attr_icon
if hasattr(self, "entity_description"):
return self.entity_description.icon
return None
@property
def entity_picture(self) -> str | None:
"""Return the entity picture to use in the frontend, if any."""
return self._attr_entity_picture
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._attr_available
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return self._attr_assumed_state
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
if hasattr(self, "_attr_force_update"):
return self._attr_force_update
if hasattr(self, "entity_description"):
return self.entity_description.force_update
return False
@property
def supported_features(self) -> int | None:
"""Flag supported features."""
return self._attr_supported_features
@property
def context_recent_time(self) -> timedelta:
"""Time that a context is considered recent."""
return self._attr_context_recent_time
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
if hasattr(self, "_attr_entity_registry_enabled_default"):
return self._attr_entity_registry_enabled_default
if hasattr(self, "entity_description"):
return self.entity_description.entity_registry_enabled_default
return True
@property
def entity_registry_visible_default(self) -> bool:
"""Return if the entity should be visible when first added to the entity registry."""
if hasattr(self, "_attr_entity_registry_visible_default"):
return self._attr_entity_registry_visible_default
if hasattr(self, "entity_description"):
return self.entity_description.entity_registry_visible_default
return True
@property
def attribution(self) -> str | None:
"""Return the attribution."""
return self._attr_attribution
@property
def entity_category(self) -> EntityCategory | None:
"""Return the category of the entity, if any."""
if hasattr(self, "_attr_entity_category"):
return self._attr_entity_category
if hasattr(self, "entity_description"):
return self.entity_description.entity_category
return None
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
@property
def enabled(self) -> bool:
"""Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
"""
return self.registry_entry is None or not self.registry_entry.disabled
@callback
def async_set_context(self, context: Context) -> None:
"""Set the context the entity currently operates under."""
self._context = context
self._context_set = dt_util.utcnow()
async def async_update_ha_state(self, force_refresh: bool = False) -> None:
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
# update entity data
if force_refresh:
try:
await self.async_device_update()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Update for %s fails", self.entity_id)
return
self._async_write_ha_state()
@callback
def async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
self._async_write_ha_state()
def _stringify_state(self, available: bool) -> str:
"""Convert state to string."""
if not available:
return STATE_UNAVAILABLE
if (state := self.state) is None:
return STATE_UNKNOWN
if isinstance(state, float):
# If the entity's state is a float, limit precision according to machine
# epsilon to make the string representation readable
return f"{state:.{FLOAT_PRECISION}}"
return str(state)
@callback
def _async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self._platform_state == EntityPlatformState.REMOVED:
# Polling returned after the entity has already been removed
return
if self.registry_entry and self.registry_entry.disabled_by:
if not self._disabled_reported:
self._disabled_reported = True
assert self.platform is not None
_LOGGER.warning(
"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration",
self.entity_id,
self.platform.platform_name,
)
return
start = timer()
attr = self.capability_attributes
attr = dict(attr) if attr else {}
available = self.available # only call self.available once per update cycle
state = self._stringify_state(available)
if available:
attr.update(self.state_attributes or {})
attr.update(self.extra_state_attributes or {})
if (unit_of_measurement := self.unit_of_measurement) is not None:
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
if assumed_state := self.assumed_state:
attr[ATTR_ASSUMED_STATE] = assumed_state
if (attribution := self.attribution) is not None:
attr[ATTR_ATTRIBUTION] = attribution
if (
device_class := (entry and entry.device_class) or self.device_class
) is not None:
attr[ATTR_DEVICE_CLASS] = str(device_class)
if (entity_picture := self.entity_picture) is not None:
attr[ATTR_ENTITY_PICTURE] = entity_picture
if (icon := (entry and entry.icon) or self.icon) is not None:
attr[ATTR_ICON] = icon
if (name := (entry and entry.name) or self.name) is not None:
attr[ATTR_FRIENDLY_NAME] = name
if (supported_features := self.supported_features) is not None:
attr[ATTR_SUPPORTED_FEATURES] = supported_features
end = timer()
if end - start > 0.4 and not self._slow_reported:
self._slow_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning(
"Updating state for %s (%s) took %.3f seconds. Please %s",
self.entity_id,
type(self),
end - start,
report_issue,
)
# Overwrite properties that have been set in the config file.
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
def _convert_temperature(state: str, attr: dict) -> str:
# Convert temperature if we detect one
# pylint: disable-next=import-outside-toplevel
from homeassistant.components.sensor import SensorEntity
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if unit_of_measure == units.temperature_unit or unit_of_measure not in (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
):
return state
domain = split_entity_id(self.entity_id)[0]
if domain != "sensor":
if not self._temperature_reported:
self._temperature_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning(
"Entity %s (%s) relies on automatic temperature conversion, this will "
"be unsupported in Home Assistant Core 2022.7. Please %s",
self.entity_id,
type(self),
report_issue,
)
elif not isinstance(self, SensorEntity):
if not self._temperature_reported:
self._temperature_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning(
"Temperature sensor %s (%s) does not inherit SensorEntity, "
"this will be unsupported in Home Assistant Core 2022.7."
"Please %s",
self.entity_id,
type(self),
report_issue,
)
else:
return state
try:
prec = len(state) - state.index(".") - 1 if "." in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
return state
state = _convert_temperature(state, attr)
if (
self._context_set is not None
and dt_util.utcnow() - self._context_set > self.context_recent_time
):
self._context = None
self._context_set = None
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update, self._context
)
def schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
self.hass.add_job(self.async_update_ha_state(force_refresh))
@callback
def async_schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
if force_refresh:
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state()
async def async_device_update(self, warning: bool = True) -> None:
"""Process 'update' or 'async_update' from entity.
This method is a coroutine.
"""
if self._update_staged:
return
self._update_staged = True
# Process update sequential
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
task: asyncio.Future[None]
if hasattr(self, "async_update"):
task = self.hass.async_create_task(self.async_update()) # type: ignore[attr-defined]
elif hasattr(self, "update"):
task = self.hass.async_add_executor_job(self.update) # type: ignore[attr-defined]
else:
return
if not warning:
await task
return
finished, _ = await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING)
for done in finished:
if exc := done.exception():
raise exc
return
_LOGGER.warning(
"Update of %s is taking over %s seconds",
self.entity_id,
SLOW_UPDATE_WARNING,
)
await task
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release()
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when entity is removed or not added."""
if self._on_remove is None:
self._on_remove = []
self._on_remove.append(func)
async def async_removed_from_registry(self) -> None:
"""Run when entity has been removed from entity registry.
To be extended by integrations.
"""
@callback
def add_to_platform_start(
self,
hass: HomeAssistant,
platform: EntityPlatform,
parallel_updates: asyncio.Semaphore | None,
) -> None:
"""Start adding an entity to a platform."""
if self._platform_state == EntityPlatformState.ADDED:
raise HomeAssistantError(
f"Entity {self.entity_id} cannot be added a second time to an entity platform"
)
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._platform_state = EntityPlatformState.ADDED
def _call_on_remove_callbacks(self) -> None:
"""Call callbacks registered by async_on_remove."""
if self._on_remove is None:
return
while self._on_remove:
self._on_remove.pop()()
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
self._platform_state = EntityPlatformState.NOT_ADDED
self._call_on_remove_callbacks()
self.hass = None # type: ignore[assignment]
self.platform = None
self.parallel_updates = None
async def add_to_platform_finish(self) -> None:
"""Finish adding an entity to a platform."""
await self.async_internal_added_to_hass()
await self.async_added_to_hass()
self.async_write_ha_state()
async def async_remove(self, *, force_remove: bool = False) -> None:
"""Remove entity from Home Assistant.
If the entity has a non disabled entry in the entity registry,
the entity's state will be set to unavailable, in the same way
as when the entity registry is loaded.
If the entity doesn't have a non disabled entry in the entity registry,
or if force_remove=True, its state will be removed.
"""
if self.platform and self._platform_state != EntityPlatformState.ADDED:
raise HomeAssistantError(
f"Entity {self.entity_id} async_remove called twice"
)
self._platform_state = EntityPlatformState.REMOVED
self._call_on_remove_callbacks()
await self.async_internal_will_remove_from_hass()
await self.async_will_remove_from_hass()
# Check if entry still exists in entity registry (e.g. unloading config entry)
if (
not force_remove
and self.registry_entry
and not self.registry_entry.disabled
):
# Set the entity's state will to unavailable + ATTR_RESTORED: True
self.registry_entry.write_unavailable_state(self.hass)
else:
self.hass.states.async_remove(self.entity_id, context=self._context)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
To be extended by integrations.
"""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
To be extended by integrations.
"""
@callback
def async_registry_entry_updated(self) -> None:
"""Run when the entity registry entry has been updated.
To be extended by integrations.
"""
async def async_internal_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Not to be extended by integrations.
"""
if self.platform:
info = {
"domain": self.platform.platform_name,
"custom_component": "custom_components" in type(self).__module__,
}
if self.platform.config_entry:
info["source"] = SOURCE_CONFIG_ENTRY
info["config_entry"] = self.platform.config_entry.entry_id
else:
info["source"] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if self.registry_entry is not None:
# This is an assert as it should never happen, but helps in tests
assert (
not self.registry_entry.disabled_by
), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(
async_track_entity_registry_updated_event(
self.hass, self.entity_id, self._async_registry_updated
)
)
async def async_internal_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Not to be extended by integrations.
"""
if self.platform:
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id)
async def _async_registry_updated(self, event: Event) -> None:
"""Handle entity registry update."""
data = event.data
if data["action"] == "remove":
await self.async_removed_from_registry()
self.registry_entry = None
await self.async_remove()
if data["action"] != "update":
return
ent_reg = er.async_get(self.hass)
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data["entity_id"])
assert self.registry_entry is not None
if self.registry_entry.disabled:
await self.async_remove()
return
assert old is not None
if self.registry_entry.entity_id == old.entity_id:
self.async_registry_entry_updated()
self.async_write_ha_state()
return
await self.async_remove(force_remove=True)
assert self.platform is not None
self.entity_id = self.registry_entry.entity_id
await self.platform.async_add_entities([self])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if not isinstance(other, self.__class__):
return False
# Can only decide equality if both have a unique id
if self.unique_id is None or other.unique_id is None:
return False
# Ensure they belong to the same platform
if self.platform is not None or other.platform is not None:
if self.platform is None or other.platform is None:
return False
if self.platform.platform != other.platform.platform:
return False
return self.unique_id == other.unique_id
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}: {self.state}>"
async def async_request_call(self, coro: Awaitable) -> None:
"""Process request batched."""
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
await coro
finally:
if self.parallel_updates:
self.parallel_updates.release()
def _suggest_report_issue(self) -> str:
"""Suggest to report an issue."""
report_issue = ""
if "custom_components" in type(self).__module__:
report_issue = "report it to the custom component author."
else:
report_issue = (
"create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
if self.platform:
report_issue += (
f"+label%3A%22integration%3A+{self.platform.platform_name}%22"
)
return report_issue
@dataclass
class ToggleEntityDescription(EntityDescription):
"""A class that describes toggle entities."""
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
entity_description: ToggleEntityDescription
_attr_is_on: bool | None = None
_attr_state: None = None
@property
@final
def state(self) -> Literal["on", "off"] | None:
"""Return the state."""
if (is_on := self.is_on) is None:
return None
return STATE_ON if is_on else STATE_OFF
@property
def is_on(self) -> bool | None:
"""Return True if entity is on."""
return self._attr_is_on
def turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))
def toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
async def async_toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
await self.async_turn_off(**kwargs)
else:
await self.async_turn_on(**kwargs)
| 34.177203 | 133 | 0.642247 | [
"Apache-2.0"
] | algra4/core | homeassistant/helpers/entity.py | 35,681 | Python |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class direction_out(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/address-family/ipv4/ipv4-unicast/default-vrf/neighbor/af-ipv4-neighbor-peergroup-holder/af-ipv4-neighbor-peergroup/prefix-list/direction-out. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__prefix_list_direction_out_prefix_name','__prefix_list_direction_out',)
_yang_name = 'direction-out'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__prefix_list_direction_out = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="prefix-list-direction-out", rest_name="out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Filter outgoing routes', u'alt-name': u'out'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
self.__prefix_list_direction_out_prefix_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="prefix-list-direction-out-prefix-name", rest_name="ip-access-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'ip-access-number', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='nei-prefix-list-filter', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'address-family', u'ipv4', u'ipv4-unicast', u'default-vrf', u'neighbor', u'af-ipv4-neighbor-peergroup-holder', u'af-ipv4-neighbor-peergroup', u'prefix-list', u'direction-out']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'address-family', u'ipv4', u'unicast', u'neighbor', u'af-ipv4-neighbor-peergroup', u'prefix-list']
def _get_prefix_list_direction_out_prefix_name(self):
"""
Getter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter)
"""
return self.__prefix_list_direction_out_prefix_name
def _set_prefix_list_direction_out_prefix_name(self, v, load=False):
"""
Setter method for prefix_list_direction_out_prefix_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out_prefix_name (nei-prefix-list-filter)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_list_direction_out_prefix_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_list_direction_out_prefix_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="prefix-list-direction-out-prefix-name", rest_name="ip-access-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'ip-access-number', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='nei-prefix-list-filter', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix_list_direction_out_prefix_name must be of a type compatible with nei-prefix-list-filter""",
'defined-type': "brocade-bgp:nei-prefix-list-filter",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="prefix-list-direction-out-prefix-name", rest_name="ip-access-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'ip-access-number', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='nei-prefix-list-filter', is_config=True)""",
})
self.__prefix_list_direction_out_prefix_name = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix_list_direction_out_prefix_name(self):
self.__prefix_list_direction_out_prefix_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="prefix-list-direction-out-prefix-name", rest_name="ip-access-number", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'ip-access-number', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='nei-prefix-list-filter', is_config=True)
def _get_prefix_list_direction_out(self):
"""
Getter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty)
"""
return self.__prefix_list_direction_out
def _set_prefix_list_direction_out(self, v, load=False):
"""
Setter method for prefix_list_direction_out, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/prefix_list_direction_out (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_list_direction_out is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_list_direction_out() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="prefix-list-direction-out", rest_name="out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Filter outgoing routes', u'alt-name': u'out'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix_list_direction_out must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="prefix-list-direction-out", rest_name="out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Filter outgoing routes', u'alt-name': u'out'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__prefix_list_direction_out = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix_list_direction_out(self):
self.__prefix_list_direction_out = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="prefix-list-direction-out", rest_name="out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Filter outgoing routes', u'alt-name': u'out'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
prefix_list_direction_out_prefix_name = __builtin__.property(_get_prefix_list_direction_out_prefix_name, _set_prefix_list_direction_out_prefix_name)
prefix_list_direction_out = __builtin__.property(_get_prefix_list_direction_out, _set_prefix_list_direction_out)
_pyangbind_elements = {'prefix_list_direction_out_prefix_name': prefix_list_direction_out_prefix_name, 'prefix_list_direction_out': prefix_list_direction_out, }
| 69.734177 | 623 | 0.754674 | [
"Apache-2.0"
] | extremenetworks/pybind | pybind/slxos/v17s_1_02/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/neighbor/af_ipv4_neighbor_peergroup_holder/af_ipv4_neighbor_peergroup/prefix_list/direction_out/__init__.py | 11,018 | Python |
import os
import pytest
import radical.utils as ru
import radical.pilot as rp
import radical.pilot.constants as rpc
from radical.pilot.agent.scheduler.hombre import Hombre
try:
import mock
except ImportError:
from tasktest import mock
# ------------------------------------------------------------------------------
# User Input for test
resource_name = 'local.localhost'
access_schema = 'ssh'
# Sample data to be staged -- available in cwd
cur_dir = os.path.dirname(os.path.abspath(__file__))
# ------------------------------------------------------------------------------
# Setup for every test
def setUp():
session = rp.Session()
config = {'lrms_info' : {'lm_info' : 'INFO',
'n_nodes' : 2,
'cores_per_node' : 4,
'gpus_per_node' : 2,
'node_list' : [['0', 0], ['1', 1]]}}
return config, session
# ------------------------------------------------------------------------------
#
def cud_nonmpi():
return {'cpu_process_type' : None,
'cpu_thread_type' : None,
'cpu_processes' : 1,
'cpu_threads' : 2,
'gpu_process_type' : None,
'gpu_thread_type' : None,
'gpu_processes' : 1,
'gpu_threads' : 1}
# ------------------------------------------------------------------------------
#
def cud_mpi():
return {'cpu_process_type' : rpc.MPI,
'cpu_thread_type' : None,
'cpu_processes' : 3,
'cpu_threads' : 1,
'gpu_process_type' : rpc.MPI,
'gpu_thread_type' : None,
'gpu_processes' : 1,
'gpu_threads' : 1}
# ------------------------------------------------------------------------------
# Cleanup any folders and files to leave the system state
# as prior to the test
def tearDown(session):
session.close()
# ------------------------------------------------------------------------------
# Test non mpi tasks
@mock.patch.object(Hombre, '__init__', return_value=None)
@mock.patch.object(Hombre, 'advance')
@mock.patch.object(ru.Profiler, 'prof')
@mock.patch('radical.utils.raise_on')
def test_nonmpi_task_withhombre_scheduler(mocked_init,
mocked_method,
mocked_profiler,
mocked_raise_on):
cfg, session = setUp()
component = Hombre(cfg=dict(), session=session)
component._log = ru.Logger('radical.pilot.test')
component._configured = False
component._cfg = cfg
component._lrms_info = cfg['lrms_info']
component._lrms_lm_info = cfg['lrms_info']['lm_info']
component._lrms_n_nodes = cfg['lrms_info']['n_nodes']
component._lrms_node_list = cfg['lrms_info']['node_list']
component._lrms_cores_per_node = cfg['lrms_info']['cores_per_node']
component._lrms_gpus_per_node = cfg['lrms_info']['gpus_per_node']
component.nodes = list()
for node in component._lrms_node_list:
component.nodes.append({'uid' : node[0],
'name' : node[1]})
# populate component attributes
component._configure()
component._oversubscribe = False
# we expect these slots to be available
all_slots = list()
for n in range(component._lrms_n_nodes):
all_slots.append({'lm_info' : 'INFO',
'cores_per_node' : 4,
'gpus_per_node' : 2,
'ncblocks' : 1,
'ngblocks' : 1,
'nodes' : [{'name': n,
'uid' : str(n),
'core_map' : [[0, 1]],
'gpu_map' : []},
{'name': n,
'uid' : str(n),
'core_map' : [[0]],
'gpu_map' : [[0]]}
]
})
all_slots.append({'lm_info' : 'INFO',
'cores_per_node' : 4,
'gpus_per_node' : 2,
'ncblocks' : 1,
'ngblocks' : 1,
'nodes' : [{'name': n,
'uid' : str(n),
'core_map' : [[2, 3]],
'gpu_map' : []},
{'name': n,
'uid' : str(n),
'core_map' : [[0]],
'gpu_map' : [[1]]}
]
})
# Allocate first TD -- should land on second node
td = cud_nonmpi()
slot = component._allocate_slot(td)
chk = all_slots[-1]
assert(slot == chk)
# Allocate second TD -- should also land on second node
td = cud_nonmpi()
slot = component._allocate_slot(td)
chk = all_slots[-2]
assert(slot == chk)
# Allocate third TD -- should land on first node
td = cud_nonmpi()
slot = component._allocate_slot(td)
chk = all_slots[-3]
assert(slot == chk)
# Allocate fourth TD -- should also land on tecond node
td = cud_nonmpi()
slot = component._allocate_slot(td)
assert slot == all_slots[-4]
# Fail with ValueError if heterogeneous CUs are scheduled
with pytest.raises(ValueError):
td = cud_nonmpi()
td['gpu_processes'] = 2
slot = component._allocate_slot(td)
# expext no slots now, as all resources are used
td = cud_nonmpi()
noslot = component._allocate_slot(td)
assert(noslot is None)
# Deallocate last filled slot
component._release_slot(slot)
# we should get a new slot now, which is the same as the one just freed
td = cud_nonmpi()
newslot = component._allocate_slot(td)
assert(newslot == slot)
tearDown(session)
# ------------------------------------------------------------------------------
# Test mpi tasks
@mock.patch.object(Hombre, '__init__', return_value=None)
@mock.patch.object(Hombre, 'advance')
@mock.patch.object(ru.Profiler, 'prof')
@mock.patch('radical.utils.raise_on')
def test_mpi_task_withhombre_scheduler(mocked_init,
mocked_method,
mocked_profiler,
mocked_raise_on):
cfg, session = setUp()
component = Hombre(cfg=dict(), session=session)
component._log = ru.Logger('radical.pilot.test')
component._configured = False
component._cfg = cfg
component._lrms_info = cfg['lrms_info']
component._lrms_lm_info = cfg['lrms_info']['lm_info']
component._lrms_n_nodes = cfg['lrms_info']['n_nodes']
component._lrms_node_list = cfg['lrms_info']['node_list']
component._lrms_cores_per_node = cfg['lrms_info']['cores_per_node']
component._lrms_gpus_per_node = cfg['lrms_info']['gpus_per_node']
component.nodes = list()
for node in component._lrms_node_list:
component.nodes.append({'uid' : node[0],
'name' : node[1]})
# populate component attributes
component._configure()
component._oversubscribe = True
# we expect these slots to be available
all_slots = [{
'lm_info' : 'INFO',
'cores_per_node' : 4,
'gpus_per_node' : 2,
'nodes' : [[0, '0', [[0], [1], [2]], [[0]]]]
},
{
'lm_info' : 'INFO',
'cores_per_node' : 4,
'gpus_per_node' : 2,
'nodes' : [[1, '1', [[0], [1], [2]], [[0]]]]
}]
# Allocate first TD -- should land on second node
td = cud_mpi()
slot = component._allocate_slot(td)
chk = all_slots[-1]
assert(slot == chk)
# Allocate second TD -- should land on first node
td = cud_mpi()
slot = component._allocate_slot(td)
assert slot == all_slots[-2]
# Fail with ValueError if heterogeneous CUs are scheduled
with pytest.raises(ValueError):
td = cud_mpi()
td['gpu_processes'] = 2
slot = component._allocate_slot(td)
# expext no slots now, as all resources are used
td = cud_mpi()
noslot = component._allocate_slot(td)
assert(noslot is None)
# Deallocate last filled slot
component._release_slot(slot)
# we should get a new slot now, which is the same as the one just freed
td = cud_mpi()
newslot = component._allocate_slot(td)
assert(newslot == slot)
tearDown(session)
# ------------------------------------------------------------------------------
| 33.906475 | 80 | 0.462232 | [
"MIT"
] | eirrgang/radical.pilot | old_tests/test_hombre_scheduler.py | 9,426 | Python |
import pytest
from app.models.user import User
from datetime import date
@pytest.fixture(scope='module')
def new_user():
user = User('test', date(day=1, month=12, year=1989))
return user
def test_new_user(new_user):
"""
GIVEN a User model
WHEN a new User is created
THEN check the username and birthday fields are defined correctly
"""
assert new_user.name == 'test'
assert new_user.birthday == date(day=1, month=12, year=1989)
| 22.333333 | 69 | 0.690832 | [
"CC0-1.0"
] | atsikham/flask-test-app | tests/unit/test_app.py | 469 | Python |
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
| 25.684478 | 78 | 0.521795 | [
"Apache-2.0"
] | yourball/qubiter | qubiter/BitVector.py | 10,094 | Python |
import os
import os.path as osp
import pickle
import random
from collections import deque
from datetime import datetime
import gym
import numpy as np
import scipy.stats as stats
import torch
import torch.optim as optim
from mpi4py import MPI
import dr
from dr.ppo.models import Policy, ValueNet
from dr.ppo.train import one_train_iter
from dr.ppo.utils import set_torch_num_threads, RunningMeanStd, traj_seg_gen
COMM = MPI.COMM_WORLD
import tensorboardX
def set_global_seeds(i):
torch.manual_seed(i)
np.random.seed(i)
random.seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
class CEMOptimizer(object):
def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function,
upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
"""Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.
"""
super().__init__()
self.sol_dim, self.max_iters, self.popsize, self.num_elites = sol_dim, max_iters, popsize, num_elites
self.ub, self.lb = upper_bound, lower_bound
self.epsilon, self.alpha = epsilon, alpha
self.cost_function = cost_function
if viz_dir is not None:
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if num_elites > popsize:
raise ValueError("Number of elites must be at most the population size.")
def reset(self):
pass
def obtain_solution(self, init_mean, init_var):
"""Optimizes the cost function using the provided initial candidate distribution
Arguments:
init_mean (np.ndarray): The mean of the initial candidate distribution.
init_var (np.ndarray): The variance of the initial candidate distribution.
"""
mean, var, t = init_mean, init_var, 0
X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while (t < self.max_iters) and np.max(var) > self.epsilon:
lb_dist, ub_dist = mean - self.lb, self.ub - mean
constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
samples = X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var) + mean
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
for i, m in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for i, m in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(
mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist
)
class PPO_Pytorch(object):
def __init__(self, experiment_name, env_params, train_params, **kwargs):
self.experiment_name = experiment_name
self.env_params = env_params
self.train_params = train_params
self.log_dir = osp.join('runs',
f'seed_{str(train_params["seed"])}_{datetime.now().strftime("%b%d_%H-%M-%S")}')
os.makedirs(self.log_dir, exist_ok=True)
with open(osp.join(self.log_dir, 'env_params.pkl'), 'wb+') as f:
pickle.dump(env_params, f)
with open(osp.join(self.log_dir, 'train_params.pkl'), 'wb+') as f:
pickle.dump(train_params, f)
super().__init__()
def train(self, env_id, backend,
train_params, env_params,
means, stdevs):
# Unpack params
hid_size = train_params['hid_size']
pol_init_std = train_params['pol_init_std']
adam_epsilon = train_params['adam_epsilon']
optim_stepsize = train_params['optim_stepsize']
# Convert means and stdevs to dict format
assert len(means) == len(stdevs), (len(means), len(stdevs))
mean_dict, stdev_dict = PPO_Pytorch._vec_to_dict(env_id, means, stdevs)
# Set parameter of env
self.env_dist.default_parameters = mean_dict
self.env_dist.stdev_dict = stdev_dict
env = self.env_dist.root_env
set_torch_num_threads()
# Construct policy and value network
pol = Policy(env.observation_space, env.action_space, hid_size, pol_init_std)
pol_optim = optim.Adam(pol.parameters(), lr=optim_stepsize, eps=adam_epsilon)
val = ValueNet(env.observation_space, hid_size)
val_optim = optim.Adam(val.parameters(), lr=optim_stepsize, eps=adam_epsilon)
optims = {'pol_optim': pol_optim, 'val_optim': val_optim}
num_train_iter = int(train_params['num_timesteps'] / train_params['ts_per_batch'])
# Buffer for running statistics
eps_rets_buff = deque(maxlen=100)
eps_rets_mean_buff = []
state_running_m_std = RunningMeanStd(shape=env.observation_space.shape)
# seg_gen is a generator that yields the training data points
seg_gen = traj_seg_gen(self.env_dist, pol, val, state_running_m_std, env_params, train_params)
eval_perfs = []
for iter_i in range(num_train_iter):
one_train_iter(pol, val, optims,
iter_i, eps_rets_buff, eps_rets_mean_buff, seg_gen,
state_running_m_std, train_params, self.eval_envs, eval_perfs)
return eval_perfs
def run(self):
set_global_seeds(self.train_params['seed'])
# Unpack params
env_name = self.env_params['env_name']
backend = self.env_params['backend']
stdev = self.train_params['env_dist_stdev']
mean_scale = self.train_params['mean_scale']
seed = self.train_params['seed']
num_eval_env = self.train_params['num_eval_env']
collision_detector = self.env_params['collision_detector']
# Obtain the initial value for the simulation parameters
env_dist = dr.dist.Normal(env_name, backend, mean_scale=mean_scale)
init_mean_param = PPO_Pytorch._dict_to_vec(env_name, env_dist.default_parameters)
init_stdev_param = np.array([stdev] * len(init_mean_param), dtype=np.float32)
cem_init_mean = np.concatenate((init_mean_param, init_stdev_param))
cem_init_stdev = np.array([1.0] * len(cem_init_mean), dtype=np.float32)
# Make envs that will be reused for training and eval
self.env_dist = dr.dist.Normal(env_name, backend)
self.env_dist.backend.set_collision_detector(env_dist.root_env, collision_detector)
self.env_dist.seed(seed)
if env_name == 'Walker':
self.eval_envs = [gym.make('Walker2d-v2') for _ in range(num_eval_env)]
elif env_name == 'Hopper':
self.eval_envs = [gym.make('Hopper-v2') for _ in range(num_eval_env)]
else:
exit('Unrecognized environment')
if COMM.Get_rank() == 0:
self.optimizer = CEMOptimizer(
sol_dim=30,
max_iters=300,
popsize=self.train_params['pop_size'],
num_elites=self.train_params['num_elites'],
cost_function=self._cost_function,
lower_bound=0.0,
# TODO: setting the upper bound this way, means that
# if the initial dimension value is 0, then the upper bound is 0
upper_bound=cem_init_mean * 5.0,
alpha=0.75,
viz_dir=self.log_dir
)
# This is buggy
# https://github.com/lanpa/tensorboardX/issues/345
self.optimizer.writer.add_text('env_params', str(self.env_params), 0)
self.optimizer.writer.add_text('train_params', str(self.train_params), 0)
res = self.optimizer.obtain_solution(cem_init_mean, cem_init_stdev)
path = osp.join(self.log_dir, 'res.pkl')
with open(path, 'wb') as f:
pickle.dump(res, f)
COMM.Abort(0)
else:
while True:
args = COMM.recv(source=0)
r = self.train(*args)
COMM.send(r, dest=0)
def _cost_function(self, samples, cem_timestep):
print(f'cem_timestep: {cem_timestep}')
env_name = self.env_params['env_name']
backend = self.env_params['backend']
pop_size = self.train_params['pop_size']
argss = [(env_name, backend,
self.train_params, self.env_params,
samples[rank][:len(samples[rank]) // 2],
samples[rank][len(samples[rank]) // 2:]) for rank in range(len(samples))]
# Send args to other MPI processes
for rank in range(1, COMM.size):
COMM.send(argss[rank], dest=rank)
# Obtain results for all args
r = self.train(*argss[0])
reses = [(0, r)] # 0 is the rank of this process
# Receive results from the other processes:
for rank in range(1, COMM.size):
r = COMM.recv(source=rank)
reses.append((rank, r))
reses = sorted(reses, key=lambda k: k[0])
print(reses)
# Get the index of the highest performing model in population
# and write result to tensorboard
max_idx = 0
max_perf = max(reses[0][1]) # 0 is the result of process rank 0. 1 brings us the eval perf list
for i, item in enumerate(reses):
perf = max(item[1])
if perf > max_perf:
max_perf = perf
max_idx = i
# Obtain the "costs" that the CEM cost function should return
costs = [- max(i[1]) for i in reses]
print(costs)
print(min(costs))
print()
return costs
@classmethod
def _dict_to_vec(cls, env_id, d):
return np.concatenate((
d['mass'],
d['damping'],
[d['gravity']]
)).flatten().copy()
@classmethod
def _vec_to_dict(cls, env_id, means, stdevs):
if env_id == 'Walker':
return dict(
mass=means[:7],
damping=means[7:-1],
gravity=means[-1]
), dict(
mass=stdevs[:7],
damping=stdevs[7:-1],
gravity=stdevs[-1]
)
elif env_id == 'Hopper':
return dict(
mass=means[:4],
damping=means[4:-1],
gravity=means[-1]
), dict(
mass=stdevs[:4],
damping=stdevs[4:-1],
gravity=stdevs[-1]
)
else:
exit('Unrecognized environment')
| 35.169096 | 111 | 0.603167 | [
"MIT"
] | quanvuong/domain_randomization | dr/experiment/ppo_pytorch.py | 12,063 | Python |
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
project = 'Domain Trails'
copyright = '2021, iAbdullahMughal'
author = 'iAbdullahMughal'
release = '1.0.0'
extensions = [
'sphinx.ext.duration',
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
}
intersphinx_disabled_domains = ['std']
autosummary_generate = True
templates_path = ['_templates']
exclude_patterns = []
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
source_suffix = ['.rst', '.md']
| 21.030303 | 62 | 0.675793 | [
"MIT"
] | iAbdullahMughal/domain-trails | docs/source/conf.py | 694 | Python |
"""Api calls for sync."""
import asyncio
import logging
from aiohttp import ClientResponseError
from august.api_common import (
API_LOCK_URL,
API_RETRY_ATTEMPTS,
API_RETRY_TIME,
API_UNLOCK_URL,
HEADER_AUGUST_ACCESS_TOKEN,
ApiCommon,
_api_headers,
_convert_lock_result_to_activities,
_process_activity_json,
_process_doorbells_json,
_process_locks_json,
)
from august.doorbell import DoorbellDetail
from august.exceptions import AugustApiAIOHTTPError
from august.lock import LockDetail, determine_door_state, determine_lock_status
from august.pin import Pin
_LOGGER = logging.getLogger(__name__)
class ApiAsync(ApiCommon):
def __init__(self, aiohttp_session, timeout=10, command_timeout=60):
self._timeout = timeout
self._command_timeout = command_timeout
self._aiohttp_session = aiohttp_session
async def async_get_session(self, install_id, identifier, password):
return await self._async_dict_to_api(
self._build_get_session_request(install_id, identifier, password)
)
async def async_send_verification_code(self, access_token, login_method, username):
return await self._async_dict_to_api(
self._build_send_verification_code_request(
access_token, login_method, username
)
)
async def async_validate_verification_code(
self, access_token, login_method, username, verification_code
):
return await self._async_dict_to_api(
self._build_validate_verification_code_request(
access_token, login_method, username, verification_code
)
)
async def async_get_doorbells(self, access_token):
response = await self._async_dict_to_api(
self._build_get_doorbells_request(access_token)
)
return _process_doorbells_json(await response.json())
async def async_get_doorbell_detail(self, access_token, doorbell_id):
response = await self._async_dict_to_api(
self._build_get_doorbell_detail_request(access_token, doorbell_id)
)
return DoorbellDetail(await response.json())
async def async_wakeup_doorbell(self, access_token, doorbell_id):
await self._async_dict_to_api(
self._build_wakeup_doorbell_request(access_token, doorbell_id)
)
return True
async def async_get_houses(self, access_token):
return await self._async_dict_to_api(
self._build_get_houses_request(access_token)
)
async def async_get_house(self, access_token, house_id):
response = await self._async_dict_to_api(
self._build_get_house_request(access_token, house_id)
)
return await response.json()
async def async_get_house_activities(self, access_token, house_id, limit=8):
response = await self._async_dict_to_api(
self._build_get_house_activities_request(
access_token, house_id, limit=limit
)
)
return _process_activity_json(await response.json())
async def async_get_locks(self, access_token):
response = await self._async_dict_to_api(
self._build_get_locks_request(access_token)
)
return _process_locks_json(await response.json())
async def async_get_operable_locks(self, access_token):
locks = await self.async_get_locks(access_token)
return [lock for lock in locks if lock.is_operable]
async def async_get_lock_detail(self, access_token, lock_id):
response = await self._async_dict_to_api(
self._build_get_lock_detail_request(access_token, lock_id)
)
return LockDetail(await response.json())
async def async_get_lock_status(self, access_token, lock_id, door_status=False):
response = await self._async_dict_to_api(
self._build_get_lock_status_request(access_token, lock_id)
)
json_dict = await response.json()
if door_status:
return (
determine_lock_status(json_dict.get("status")),
determine_door_state(json_dict.get("doorState")),
)
return determine_lock_status(json_dict.get("status"))
async def async_get_lock_door_status(
self, access_token, lock_id, lock_status=False
):
response = await self._async_dict_to_api(
self._build_get_lock_status_request(access_token, lock_id)
)
json_dict = await response.json()
if lock_status:
return (
determine_door_state(json_dict.get("doorState")),
determine_lock_status(json_dict.get("status")),
)
return determine_door_state(json_dict.get("doorState"))
async def async_get_pins(self, access_token, lock_id):
response = await self._async_dict_to_api(
self._build_get_pins_request(access_token, lock_id)
)
json_dict = await response.json()
return [Pin(pin_json) for pin_json in json_dict.get("loaded", [])]
async def _async_call_lock_operation(self, url_str, access_token, lock_id):
response = await self._async_dict_to_api(
self._build_call_lock_operation_request(
url_str, access_token, lock_id, self._command_timeout
)
)
return await response.json()
async def _async_lock(self, access_token, lock_id):
return await self._async_call_lock_operation(
API_LOCK_URL, access_token, lock_id
)
async def async_lock(self, access_token, lock_id):
"""Execute a remote lock operation.
Returns a LockStatus state.
"""
return determine_lock_status(
(await self._async_lock(access_token, lock_id)).get("status")
)
async def async_lock_return_activities(self, access_token, lock_id):
"""Execute a remote lock operation.
Returns an array of one or more august.activity.Activity objects
If the lock supports door sense one of the activities
will include the current door state.
"""
return _convert_lock_result_to_activities(
await self._async_lock(access_token, lock_id)
)
async def _async_unlock(self, access_token, lock_id):
return await self._async_call_lock_operation(
API_UNLOCK_URL, access_token, lock_id
)
async def async_unlock(self, access_token, lock_id):
"""Execute a remote unlock operation.
Returns a LockStatus state.
"""
return determine_lock_status(
(await self._async_unlock(access_token, lock_id)).get("status")
)
async def async_unlock_return_activities(self, access_token, lock_id):
"""Execute a remote lock operation.
Returns an array of one or more august.activity.Activity objects
If the lock supports door sense one of the activities
will include the current door state.
"""
return _convert_lock_result_to_activities(
await self._async_unlock(access_token, lock_id)
)
async def async_refresh_access_token(self, access_token):
"""Obtain a new api token."""
return (
await self._async_dict_to_api(
self._build_refresh_access_token_request(access_token)
)
).headers[HEADER_AUGUST_ACCESS_TOKEN]
async def _async_dict_to_api(self, api_dict):
url = api_dict["url"]
method = api_dict["method"]
access_token = api_dict.get("access_token", None)
del api_dict["url"]
del api_dict["method"]
if access_token:
del api_dict["access_token"]
payload = api_dict.get("params") or api_dict.get("json")
if "headers" not in api_dict:
api_dict["headers"] = _api_headers(access_token=access_token)
if "timeout" not in api_dict:
api_dict["timeout"] = self._timeout
_LOGGER.debug(
"About to call %s with header=%s and payload=%s",
url,
api_dict["headers"],
payload,
)
attempts = 0
while attempts < API_RETRY_ATTEMPTS:
attempts += 1
response = await self._aiohttp_session.request(method, url, **api_dict)
_LOGGER.debug(
"Received API response: %s, %s", response.status, await response.read()
)
if response.status == 429:
_LOGGER.debug(
"August sent a 429 (attempt: %d), sleeping and trying again",
attempts,
)
asyncio.sleep(API_RETRY_TIME)
continue
break
_raise_response_exceptions(response)
return response
def _raise_response_exceptions(response):
try:
response.raise_for_status()
except ClientResponseError as err:
if err.status == 422:
raise AugustApiAIOHTTPError(
"The operation failed because the bridge (connect) is offline.",
) from err
if err.status == 423:
raise AugustApiAIOHTTPError(
"The operation failed because the bridge (connect) is in use.",
) from err
if err.status == 408:
raise AugustApiAIOHTTPError(
"The operation timed out because the bridge (connect) failed to respond.",
) from err
raise err
| 34.480144 | 90 | 0.653335 | [
"MIT"
] | THATDONFC/py-august | august/api_async.py | 9,551 | Python |
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BeautifulSoup import BeautifulSoup
import urllib2
import xmltodict
import json
import Queue
from threading import Thread
from collections import OrderedDict
import itertools
from ascii_graph import Pyasciigraph
import sys
import argparse
import os
# default build that is used against apache hive precommit test report
REPORTS_DIR = "/tmp/slow-test-reports"
BUILD_NUMBER = 830
TOP_K = 25
json_dumps = []
# parallel xml report downloader
class ReportDownloader(Thread):
def __init__(self, q):
Thread.__init__(self)
self.q = q
def run(self):
while True:
# Get the work from the queue and expand the tuple
link = self.q.get()
xmlFile = urllib2.urlopen(link)
xmlData = xmlFile.read()
xmlSoup = BeautifulSoup(xmlData)
d = xmltodict.parse(xmlData, xml_attribs=True)
d['testsuite'].pop('properties', None)
json_dumps.append(d)
self.q.task_done()
def get_links(rootUrl):
html_page = urllib2.urlopen(rootUrl)
soup = BeautifulSoup(html_page)
result = []
for link in soup.findAll('a'):
hrefs = link.get('href')
if hrefs.endswith('.xml'):
result.append(rootUrl + "/" + hrefs)
return result
def take(iterable, n=TOP_K):
return list(itertools.islice(iterable, 0, n))
def plot_testsuite_time(json_data, top_k=TOP_K, ascii_graph=False, report_file=None):
suite_time = {}
overall_time = 0.0
for suite in json_data:
name = suite['testsuite']['@name'].rsplit(".",1)[-1]
time = float(suite['testsuite']['@time'].replace(',',''))
overall_time += time
if name in suite_time:
total_time = suite_time[name]
suite_time[name] = total_time + time
else:
suite_time[name] = time
d_descending = OrderedDict(sorted(suite_time.items(),
key=lambda kv: kv[1], reverse=True))
gdata = []
for k,v in take(d_descending.iteritems(), top_k):
gdata.append((k, v))
print '\nTop ' + str(top_k) + ' testsuite in terms of execution time (in seconds).. [Total time: ' + str(overall_time) + ' seconds]'
if ascii_graph:
graph = Pyasciigraph()
for line in graph.graph('', gdata):
print line
else:
for line in gdata:
print line[0] + "\t" + str(line[1])
if report_file != None:
with open(report_file, "w") as f:
f.write('Top ' + str(top_k) + ' testsuite in terms of execution time (in seconds).. [Total time: ' + str(overall_time) + ' seconds]\n')
for line in gdata:
f.write(line[0] + "\t" + str(line[1]) + "\n")
def plot_testcase_time(json_data, top_k=TOP_K, ascii_graph=False, report_file=None):
testcase_time = {}
overall_time = 0.0
for suite in json_data:
if int(suite['testsuite']['@tests']) > 0:
for t in suite['testsuite']['testcase']:
if isinstance(t, dict):
name = t['@classname'].rsplit(".",1)[-1] + "_" + t['@name']
time = float(t['@time'].replace(',',''))
overall_time += time
if name in testcase_time:
total_time = testcase_time[name]
testcase_time[name] = total_time + time
else:
testcase_time[name] = time
if int(suite['testsuite']['@tests']) == 0:
print "Empty batch detected for testsuite: " + suite['testsuite']['@name'] + " which took " + suite['testsuite']['@time'] + "s"
d_descending = OrderedDict(sorted(testcase_time.items(),
key=lambda kv: kv[1], reverse=True))
gdata = []
for k,v in take(d_descending.iteritems(), top_k):
gdata.append((k, v))
print '\nTop ' + str(top_k) + ' testcases in terms of execution time (in seconds).. [Total time: ' + str(overall_time) + ' seconds]'
if ascii_graph:
graph = Pyasciigraph()
for line in graph.graph('', gdata):
print line
else:
for line in gdata:
print line[0] + "\t" + str(line[1])
if report_file != None:
with open(report_file, "a") as f:
f.write('\nTop ' + str(top_k) + ' testcases in terms of execution time (in seconds).. [Total time: ' + str(overall_time) + ' seconds]\n')
for line in gdata:
f.write(line[0] + "\t" + str(line[1]) + "\n")
def get_latest_build_with_report(build_number):
latest_report = BUILD_NUMBER
if not os.path.exists(REPORTS_DIR):
os.makedirs(REPORTS_DIR)
for i in os.listdir(REPORTS_DIR):
if i.endswith(".txt"):
current_report = int(i.split(".txt")[0])
if current_report > latest_report:
latest_report = current_report
return latest_report
def get_pending_report_list(last_report, precommit_url):
next_report = last_report
pending_reports = []
done = False
while done == False:
try:
urllib2.urlopen(precommit_url % next_report)
pending_reports.append(next_report)
next_report += 1
except urllib2.HTTPError, e:
done = True
return pending_reports
def print_report(reportUrl, json_dump, top_k, ascii_graph, report_file=None):
get_links(reportUrl)
links = get_links(reportUrl)
# Create a queue to communicate with the worker threads
q = Queue.Queue()
print "\nProcessing " + str(len(links)) + " test xml reports from " + reportUrl + ".."
# Create 8 worker threads
for x in range(8):
worker = ReportDownloader(q)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put the tasks into the queue as a tuple
for link in links:
q.put(link)
# Causes the main thread to wait for the queue to finish processing all the tasks
q.join()
# dump test reports in json format
if json_dump:
with open('data.json', 'w') as outfile:
json.dump(json_dumps, outfile, indent = 2)
# print or plot top-k tests on console
plot_testsuite_time(json_dumps, top_k, ascii_graph, report_file)
plot_testcase_time(json_dumps, top_k, ascii_graph, report_file)
del json_dumps[:]
def main():
parser = argparse.ArgumentParser(description='Program to print top-k test report for Apache Hive precommit tests')
parser.add_argument('-b', action='store', dest='build_number', help='build number of the test run. default uses test reports from apache hive precommit test run.')
parser.add_argument('-u', action='store', dest='report_url', help='url for the test report')
parser.add_argument('-j', action='store_true', default=False, dest='json_dump', help='json dump of test reports')
parser.add_argument('-k', action='store', dest='top_k', type=int, help='print top k testsuite and testcases to console')
parser.add_argument('-a', action='store_true', default=False, dest='ascii_graph', help='ascii output of the report')
parser.add_argument('-l', action='store_true', default=False, dest='latest_report', help='will generate all missing reports up until latest build number')
args = parser.parse_args()
precommit_url = "http://104.198.109.242/logs/PreCommit-HIVE-Build-%s/test-results/"
last_report = get_latest_build_with_report(BUILD_NUMBER)
pending_reports = get_pending_report_list(last_report, precommit_url)
build = last_report
if args.build_number != None:
build = args.build_number
reportUrl = precommit_url % build
if args.report_url != None:
reportUrl = args.report_url
json_dump = args.json_dump
top_k = TOP_K
if args.top_k != None:
top_k = args.top_k
ascii_graph = args.ascii_graph
print_report(reportUrl, json_dump, top_k, ascii_graph, REPORTS_DIR + str(build) + ".txt")
if args.latest_report:
for l in pending_reports:
reportUrl = precommit_url % l
print_report(reportUrl, json_dump, top_k, ascii_graph, REPORTS_DIR + str(l) + ".txt")
main()
| 33.970954 | 164 | 0.703188 | [
"Apache-2.0"
] | 10088/hive | testutils/gen-report.py | 8,187 | Python |
from terminusdb_client.woqlclient.api_endpoint_const import APIEndpointConst
from .connectCapabilitiesResponse import ConnectResponse
from .getSchemaTurtleResponse import RESPONSE
def mocked_requests(*args, **kwargs):
class MockResponse:
def json(self):
if self._json_data is None:
raise ValueError("EXCEPTION NO JSON OBJECT")
return self._json_data
@property
def status_code(self):
return self._status_code
@property
def url(self):
return self._url
@property
def text(self):
return self._text
def __init__(self, url, status, action_type):
# set status code and content
self._json_data = None
self._text = None
self._status_code = status
self._content = "cont"
self._url = url
# add json data if provided
if action_type == APIEndpointConst.CONNECT:
self._json_data = ConnectResponse
elif action_type == APIEndpointConst.GET_TRIPLES:
self._text = RESPONSE
# elif action_type == APIEndpointConst.WOQL_SELECT:
# with open("tests/getAllClassQueryResponse.json") as json_file:
# json_data = json.load(json_file)
# self._json_data = json_data
# json_file.close()
elif (
action_type == APIEndpointConst.CREATE_DATABASE
or action_type == APIEndpointConst.DELETE_DATABASE
or action_type == APIEndpointConst.UPDATE_TRIPLES
or action_type == APIEndpointConst.BRANCH
or action_type == APIEndpointConst.CREATE_GRAPH
):
self._json_data = {"terminus:status": "terminus:success"}
if (
args[0]
== "http://localhost:6363/branch/admin/myDBName/local/branch/my_new_branch"
):
return MockResponse(args[0], 200, APIEndpointConst.BRANCH)
elif (
args[0]
== "http://localhost:6363/graph/admin/myDBName/local/branch/master/instance/mygraph"
):
return MockResponse(args[0], 200, APIEndpointConst.CREATE_GRAPH)
elif (
args[0]
== "http://localhost:6363/triples/admin/myDBName/local/branch/master/instance/mygraph"
):
return MockResponse(args[0], 200, APIEndpointConst.GET_TRIPLES)
elif args[0] == "http://localhost:6363/db/admin/myFirstTerminusDB":
return MockResponse(args[0], 200, APIEndpointConst.DELETE_DATABASE)
return MockResponse(args[0], 200, APIEndpointConst.CONNECT)
| 33.607595 | 94 | 0.616196 | [
"Apache-2.0"
] | Morijarti/terminusdb-client-python | terminusdb_client/tests/mockResponse.py | 2,655 | Python |
def main():
with open('inputs/01.in') as f:
data = [int(line) for line in f]
print(sum(data))
result = 0
seen = {0}
while True:
for item in data:
result += item
if result in seen:
print(result)
return
seen.add(result)
if __name__ == '__main__':
main()
| 18.2 | 40 | 0.46978 | [
"MIT"
] | Wattleninja/Advent-of-Code | 2018/day_01.py | 364 | Python |
################################################################################
#
# Author: Diego Montufar
# Date: Apr/2015
# Name: __init__.py
# Description: Here we define the available web services which can be accessed by
# following the app.route path as URL. The indexer module will perform the hard work
# as it communicates directly with the elasticsearch and couchdb instances through RESTful calls.
# Although Flask apps allow us to perform more complex tasks, we only are using basic calls
# to the corresponding inexer module method and return to the wenb interface a json response.
#
# Dependencies: Flask -> Provides you with tools, libraries and technologies that allow you to build a web application
# indexer -> For communicating with elasticsearch and couchdb
#
################################################################################
from services import indexer #indexer module
from datetime import timedelta #datetime tools
from flask import make_response, request, current_app, jsonify, Flask , render_template #flask crossdomain tools
from functools import update_wrapper #python wrappers
app = Flask(__name__)
#Python version workaround
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
#Define a wrapper for supporting crossdomain calls
def crossdomain(origin=None, methods=None, headers=None, max_age=21600, attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
#Service: /
#Description: Main web page index.html is called here. i.e http://{localhost}/ or http://{localhost}/index
#Parameters: none
#output: index.html
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
#Service: genericSearch
#Description: Perform custom searches based on elasticsearch queries in json format
#Parameters: <jsonQuery> (String) is a json based tring, which must follow the elasticsearch query structure
#output: a json object containing the matched results
@app.route('/genericSearch/<jsonQuery>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getGenericSearch(jsonQuery):
return jsonify(indexer.genericSearch(jsonQuery))
#Service: genericGeoSearch
#Description: Perform custom geo searches based on elasticsearch queries in json format
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <suburbCode> (String) -> Suburb code. i.e. 206041122
# <startP> (Int) -> Pagination support. i.e. 0 if you want all the results, or 15 if you want to skip the first 15 results
# <sizeP> (Int) -> Pagination support. i.e 50 if you want up to 50 resutls by taking account of startP parameter
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/genericGeoSearch/<term>/<suburbCode>/<startP>/<sizeP>/<startDate>/<endDate>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getGenericGeoSearch(term,suburbCode,startP,sizeP,startDate,endDate):
return jsonify(indexer.getTweetsBySuburb(term,suburbCode,startP,sizeP,startDate,endDate))
#Service: sentimentTotals
#Description: Search for sentiment totals by terms, suburb and date range
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <suburbCode> (String) -> Suburb code. i.e. 206041122
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/sentimentTotals/<term>/<suburbCode>/<startDate>/<endDate>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getSentimentTotals(term,suburbCode,startDate,endDate):
return jsonify(indexer.statisticsByTerm(term,suburbCode,startDate,endDate))
#Service: suburbsByCountry
#Description: Get a list of the suburbs of main cities of AU as defined on the ABS 2011 census database
#Parameters: <countryCode> (Int) -> Country code. In this case we only care about Autralia, that is countryCode = 1
#output: a json object containing the matched results
@app.route('/suburbsByCountry/<countryCode>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getSuburbsList(countryCode):
return jsonify(indexer.getSuburbsList(countryCode))
#Service: culturesByState
#Description: Get GeoJson File containing information relatd to countries of birth by suburb of AU as defined on the ABS 2011 census database
#Parameters: <stateCode> (String) -> the code of the state: i.e: VIC, TAS, etc
#return: GeoJson object
@app.route('/culturesByState/<stateCode>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getCulturesByState(stateCode):
return jsonify(indexer.getCulturesByState(stateCode))
#Service: languagesByCountry
#Description: Get a list of languages related to its corresponding country where they are spoken
#Parameters: <countryCode> (String) -> Country code. In this case we only care about Autralia, that is countryCode = 1
#return: a json object containing the matched results
@app.route('/languagesByCountry/<countryCode>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getLanguagesByCountry(countryCode):
return jsonify(indexer.getLanguages(countryCode))
#Service: tweetsByCountryOfBirth
#Description: Get the count of tweets grouped by language as defined on the languages database
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <stateCode> (String) -> the code of the state: i.e: VIC, TAS, etc
# <suburbCode> (String) -> Suburb code. i.e. 206041122
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/tweetsByCountryOfBirth/<term>/<stateCode>/<suburbCode>/<startDate>/<endDate>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getTweetsByCountryOfBirth(term,stateCode,suburbCode,startDate,endDate):
return jsonify(indexer.getTweetsByCountryOfBirth(term,stateCode,suburbCode,startDate,endDate))
#Service: topListBySuburb
#Description: List the top N list count of a particular field within a date range by suburb
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <suburbCode> (String) -> Suburb code. i.e. 206041122
# <field> (String) -> Twitter field to aggreagate i.e. user.screen_name
# <size> (Int) -> N i.e 5 for a Top five list
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/topListBySuburb/<term>/<suburbCode>/<field>/<size>/<startDate>/<endDate>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getTopList(term,suburbCode,field,size,startDate,endDate):
return jsonify(indexer.getTopListBySuburb(term,suburbCode,field,size,startDate,endDate))
#Service: sentimentTotalsByCity
#Description: Search for the totals of sentiment analysis in all the cities of AU within a date range
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/sentimentTotalsByCity/<term>/<startDate>/<endDate>',methods=['GET', 'OPTIONS'])
@crossdomain(origin='*')
def getSentimentTotalsByCity(term,startDate,endDate):
return jsonify(indexer.getAllSentimentTotalsByCity(term,startDate,endDate))
#Service: topListByCity
#Description: Get the top N list by city within a date range
#Parameters: <field> (String) -> Twitter field to aggreagate i.e. user.screen_name
# <size> (Int) -> N i.e 5 for a Top five list
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/topListByCity/<field>/<size>/<startDate>/<endDate>',methods=['GET','OPTIONS'])
@crossdomain(origin='*')
def getAllTopListByCity(field,size,startDate, endDate):
return jsonify(indexer.getAllTopListsByCity(field,size,startDate, endDate))
#Service: cultureTotalsByCity
#Description: Get the count of languages found on the tweets by terms within a date range
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <stateCode> (String) -> the code of the state: i.e: VIC, TAS, etc
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/cultureTotalsByCity/<term>/<stateCode>/<startDate>/<endDate>',methods=['GET','OPTIONS'])
@crossdomain(origin='*')
def getAllLanguagesTotalsByCity(term, stateCode, startDate, endDate):
return jsonify(indexer.getAllLanguagesTotalsByCity(term, stateCode, startDate, endDate))
#Service: sentimentTotalsByCity
#Description: Get the total sentiment by City, by term within a date range.
#Disclaimer: This search takes a long time. Must be reviewed.
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <stateCode> (String) -> the code of the state: i.e: VIC, TAS, etc
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/sentimentTotalsByCity/<term>/<stateCode>/<startDate>/<endDate>',methods=['GET','OPTIONS'])
@crossdomain(origin='*')
def getAllSentimentByCity(term,stateCode, startDate, endDate):
return jsonify(indexer.getAllSentimentByCity(term,stateCode, startDate, endDate))
#Service: sentimentAnalysis
#Description: Perform sentiment Analysis using the tweet_classifier open source library
#Parameters: <text> (String) -> i.e. I'm happy to be here :)
#output: json results containing the sentiment analysis performed on the provided text
@app.route('/sentimentAnalysis/<text>',methods=['GET','OPTIONS'])
@crossdomain(origin='*')
def getSentimentAnalysis(text):
return jsonify(indexer.getSentimentAnalysis(text))
#Service: cultureSentimentBySuburb
#Description: Get the count of sentiment of languages found on the tweets by terms within a date range
#Parameters: <term> (String) -> Text you want to search for. i.e. AFL or Tony Abbott or *
# <suburbCode> (String) -> Suburb code. i.e. 206041122
# <startDate> (Int) -> Timestamp start date. i.e 1428069500339
# <endDate> (Int) -> Timestamp end date i.e 1430578700339
#output: a json object containing the matched results
@app.route('/cultureSentimentBySuburb/<term>/<suburbCode>/<startDate>/<endDate>',methods=['GET','OPTIONS'])
@crossdomain(origin='*')
def getLanguagesSentimentBySuburb(term, suburbCode, startDate, endDate):
return jsonify(indexer.getLanguagesSentimentBySuburb(term, suburbCode, startDate, endDate))
#main
if __name__ == "__main__":
app.run(debug=True) | 53.181467 | 145 | 0.675476 | [
"MIT",
"Unlicense"
] | diogonal/SentimentAnalyser | web/__init__.py | 13,774 | Python |
# reference_metric.py: Define all needed quantities
# for a reference metric.
# Given uniform (reference metric) coordinate
# (xx[0],xx[1],xx[2]), you must define:
# 1) xxmin[3],xxmax[3]: Valid ranges for each
# uniform coordinate xx0,xx1,xx2
# 2) xxSph[3]: Spherical coordinate (r,theta,phi),
# in terms of uniform coordinate xx0,xx1,xx2
# 3) xx_to_Cart[3]: Cartesian coordinate (x,y,z),
# in terms of uniform coordinate xx0,xx1,xx2
# 4) scalefactor_orthog:
# orthogonal coordinate scale factor
# (positive root of diagonal reference metric
# components)
# 5) Cart_to_xx[3]: Inverse of xx_to_Cart:
# xx0,xx1,xx2 as functions of (x,y,z).
# In the case that there exists no closed-form
# expression, then a root finder might be needed
# 6) UnitVectors[3][3]: Unit vectors of reference
# metric.
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
from outputC import outputC, superfast_uniq, add_to_Cfunction_dict, indent_Ccode # NRPy+: Core C code output module
# VVVVVVVVVVVVVVVVV
## TO BE DEPRECATED
from outputC import outC_function_dict
# ^^^^^^^^^^^^^^^^^
from outputC import outC_NRPy_basic_defines_h_dict
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import os, sys # Standard Python modules for multiplatform OS-level functions
# Step 0a: Initialize parameters
thismodule = __name__
par.initialize_param(par.glb_param("char", thismodule, "CoordSystem", "Spherical"))
par.initialize_param(par.glb_param("char", thismodule, "enable_rfm_precompute", "False"))
par.initialize_param(par.glb_param("char", thismodule, "rfm_precompute_to_Cfunctions_and_NRPy_basic_defines", "False"))
par.initialize_param(par.glb_param("char", thismodule, "rfm_precompute_Ccode_outdir", "Ccode"))
# Step 0b: Declare global variables
xx = gri.xx
xx_to_Cart = ixp.zerorank1(DIM=4) # Must be set as a function of (xx[0],xx[1],xx[2])
Cart_to_xx = ixp.zerorank1(DIM=4) # Must be set as a function of (xx[0],xx[1],xx[2])
xxSph = ixp.zerorank1(DIM=4) # Must be set as a function of (xx[0],xx[1],xx[2])
scalefactor_orthog = ixp.zerorank1(DIM=4) # Must be set as a function of (xx[0],xx[1],xx[2])
Cartx, Carty, Cartz = sp.symbols("Cartx Carty Cartz", real=True)
Cart = [Cartx, Carty, Cartz]
scalefactor_orthog_funcform = ixp.zerorank1(DIM=4) # Must be set in terms of generic functions of xx[]s
# The following are necessary since SymPy has trouble with its native sinh and cosh functions.
def nrpysinh(x):
return (sp.exp(x) - sp.exp(-x)) * sp.Rational(1, 2)
def nrpycosh(x):
return (sp.exp(x) + sp.exp(-x)) * sp.Rational(1, 2)
have_already_called_reference_metric_function = False
def reference_metric(SymPySimplifyExpressions=True): #, enable_compute_hatted_quantities=True):
global f0_of_xx0_funcform, f1_of_xx1_funcform, f2_of_xx0_xx1_funcform, f3_of_xx0_funcform, f4_of_xx2_funcform
global f0_of_xx0, f1_of_xx1, f2_of_xx1, f2_of_xx0_xx1, f3_of_xx0, f4_of_xx2
f0_of_xx0_funcform = sp.Function('f0_of_xx0_funcform')(xx[0])
f1_of_xx1_funcform = sp.Function('f1_of_xx1_funcform')(xx[1])
# f2_of_xx1_funcform = sp.Function('f2_of_xx1_funcform')(xx[1])
f2_of_xx0_xx1_funcform = sp.Function('f2_of_xx0_xx1_funcform')(xx[0], xx[1])
f3_of_xx0_funcform = sp.Function('f3_of_xx0_funcform')(xx[0])
f4_of_xx2_funcform = sp.Function('f4_of_xx2_funcform')(xx[2])
f0_of_xx0, f1_of_xx1, f2_of_xx1, f2_of_xx0_xx1, f3_of_xx0, f4_of_xx2 = par.Cparameters("REAL", thismodule,
["f0_of_xx0", "f1_of_xx1", "f2_of_xx1", "f2_of_xx0_xx1", "f3_of_xx0", "f4_of_xx2"], 1e300)
# FIXME: Hack
# return values of par.Cparameters() in the following code block are unused, so we ignore them.
par.Cparameters("REAL", thismodule, ["f0_of_xx0__D0", "f0_of_xx0__DD00","f0_of_xx0__DDD000"], 1e300)
par.Cparameters("REAL", thismodule, ["f1_of_xx1__D1", "f1_of_xx1__DD11","f1_of_xx1__DDD111"], 1e300)
par.Cparameters("REAL", thismodule, ["f2_of_xx1__D1", "f2_of_xx1__DD11","f2_of_xx1__DDD111"], 1e300)
par.Cparameters("REAL", thismodule,
["f2_of_xx0_xx1__D0", "f2_of_xx0_xx1__D1", "f2_of_xx0_xx1__DD00", "f2_of_xx0_xx1__DD11"], 1e300)
par.Cparameters("REAL", thismodule, ["f3_of_xx0__D0", "f3_of_xx0__DD00"], 1e300)
par.Cparameters("REAL", thismodule, ["f4_of_xx2__D2", "f4_of_xx2__DD22"], 1e300)
global have_already_called_reference_metric_function # setting to global enables other modules to see updated value.
have_already_called_reference_metric_function = True
CoordSystem = par.parval_from_str("reference_metric::CoordSystem")
M_PI, M_SQRT1_2 = par.Cparameters("#define", thismodule, ["M_PI", "M_SQRT1_2"], "")
global xxmin
global xxmax
global UnitVectors
UnitVectors = ixp.zerorank2(DIM=3)
# Set up hatted metric tensor, rescaling matrix, and rescaling vector
#####################################################################
# SPHERICAL-LIKE COORDINATE SYSTEMS WITH & WITHOUT RADIAL RESCALING #
#####################################################################
if CoordSystem in ('Spherical', 'SinhSpherical', 'SinhSphericalv2'):
# Adding assumption real=True can help simplify expressions involving xx[0] & xx[1] below.
xx[0] = sp.symbols("xx0", real=True)
xx[1] = sp.symbols("xx1", real=True)
if CoordSystem == "Spherical":
RMAX = par.Cparameters("REAL", thismodule, ["RMAX"],10.0)
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [ RMAX, M_PI, M_PI]
r = xx[0]
th = xx[1]
ph = xx[2]
Cart_to_xx[0] = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
Cart_to_xx[1] = sp.acos(Cartz / Cart_to_xx[0])
Cart_to_xx[2] = sp.atan2(Carty, Cartx)
elif CoordSystem == "SinhSpherical":
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [sp.sympify(1), M_PI, M_PI]
AMPL, SINHW = par.Cparameters("REAL",thismodule,["AMPL","SINHW"],[10.0,0.2])
# Set SinhSpherical radial coordinate by default; overwrite later if CoordSystem == "SinhSphericalv2".
r = AMPL * (sp.exp(xx[0] / SINHW) - sp.exp(-xx[0] / SINHW)) / \
(sp.exp(1 / SINHW) - sp.exp(-1 / SINHW))
th = xx[1]
ph = xx[2]
Cart_to_xx[0] = SINHW*sp.asinh(sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)*sp.sinh(1/SINHW)/AMPL)
Cart_to_xx[1] = sp.acos(Cartz / sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2))
Cart_to_xx[2] = sp.atan2(Carty, Cartx)
# SinhSphericalv2 adds the parameter "const_dr", which allows for a region near xx[0]=0 to have
# constant radial resolution of const_dr, provided the sinh() term does not dominate near xx[0]=0.
elif CoordSystem == "SinhSphericalv2":
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [sp.sympify(1), M_PI, M_PI]
AMPL, SINHW = par.Cparameters("REAL",thismodule,["AMPL","SINHW"],[10.0,0.2])
const_dr = par.Cparameters("REAL",thismodule,["const_dr"],0.0625)
r = AMPL*( const_dr*xx[0] + (sp.exp(xx[0] / SINHW) - sp.exp(-xx[0] / SINHW)) /
(sp.exp(1 / SINHW) - sp.exp(-1 / SINHW)) )
th = xx[1]
ph = xx[2]
# NO CLOSED-FORM EXPRESSION FOR RADIAL INVERSION.
# Cart_to_xx[0] = "NewtonRaphson"
# Cart_to_xx[1] = sp.acos(Cartz / sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2))
# Cart_to_xx[2] = sp.atan2(Carty, Cartx)
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xx_to_Cart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xx_to_Cart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xx_to_Cart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
f0_of_xx0 = xxSph[0]
f1_of_xx1 = sp.sin(xxSph[1])
scalefactor_orthog_funcform[0] = sp.diff(f0_of_xx0_funcform,xx[0])
scalefactor_orthog_funcform[1] = f0_of_xx0_funcform
scalefactor_orthog_funcform[2] = f0_of_xx0_funcform*f1_of_xx1_funcform
# Set the unit vectors
UnitVectors = [[sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
######################################################################
# SPHERICAL-LIKE COORDINATE SYSTEMS WITH RADIAL AND THETA RESCALINGS #
######################################################################
elif CoordSystem in ('NobleSphericalThetaOptionOne', 'NobleSphericalThetaOptionTwo'):
# WARNING: CANNOT BE USED FOR SENR RUNS;
# THESE DO NOT DEFINE xxmin, xxmax, Cart_to_xx
# ALSO THE RADIAL RESCALINGS ARE NOT ODD FUNCTIONS OF xx0,
# MEANING THAT CURVI. BOUNDARY CONDITIONS WILL NOT WORK.
Rin,R0 = par.Cparameters("REAL", thismodule, ["Rin","R0"],[1.08986052555408,0.0])
x0beg = sp.log(Rin-R0)
xx[0] = sp.symbols("xx0", real=True)
r = R0 + sp.exp(x0beg + xx[0])
# 0.053407075111026485 == 0.017*pi
th_c,xi,x1beg = par.Cparameters("REAL", thismodule, ["th_c","xi","x1beg"],[0.053407075111026485,0.25,0.0])
xx[1] = sp.symbols("xx1", real=True)
x1j = x1beg + xx[1]
if CoordSystem == "NobleSphericalThetaOptionOne":
th = th_c + (M_PI - 2*th_c)*x1j + xi*sp.sin(2*M_PI*x1j)
elif CoordSystem == "NobleSphericalThetaOptionTwo":
x1_n_exponent = par.Cparameters("REAL", thismodule, ["x1_n_exponent"],9.0)
th = M_PI/2 * ( 1 + (1 - xi)*(2*x1j - 1) + (xi - 2*th_c/M_PI)*(2*x1j - 1)**x1_n_exponent )
xx[2] = sp.symbols("xx2", real=True)
ph = xx[2]
xxSph[0] = r
xxSph[1] = th
xxSph[2] = ph
# Now define xCart, yCart, and zCart in terms of x0,xx[1],xx[2].
# Note that the relation between r and x0 is not necessarily trivial in SinhSpherical coordinates. See above.
xx_to_Cart[0] = xxSph[0]*sp.sin(xxSph[1])*sp.cos(xxSph[2])
xx_to_Cart[1] = xxSph[0]*sp.sin(xxSph[1])*sp.sin(xxSph[2])
xx_to_Cart[2] = xxSph[0]*sp.cos(xxSph[1])
scalefactor_orthog[0] = sp.diff(xxSph[0],xx[0])
scalefactor_orthog[1] = xxSph[0]
scalefactor_orthog[2] = xxSph[0]*sp.sin(xxSph[1])
# Set the unit vectors
UnitVectors = [[ sp.sin(xxSph[1])*sp.cos(xxSph[2]), sp.sin(xxSph[1])*sp.sin(xxSph[2]), sp.cos(xxSph[1])],
[ sp.cos(xxSph[1])*sp.cos(xxSph[2]), sp.cos(xxSph[1])*sp.sin(xxSph[2]), -sp.sin(xxSph[1])],
[ -sp.sin(xxSph[2]), sp.cos(xxSph[2]), sp.sympify(0) ]]
##########################################################################
# CYLINDRICAL-LIKE COORDINATE SYSTEMS WITH & WITHOUT RADIAL/Z RESCALINGS #
##########################################################################
elif CoordSystem in ('Cylindrical', 'SinhCylindrical', 'SinhCylindricalv2'):
# Assuming the cylindrical radial coordinate
# is positive makes nice simplifications of
# unit vectors possible.
xx[0] = sp.symbols("xx0", real=True)
if CoordSystem == "Cylindrical":
RHOMAX,ZMIN,ZMAX = par.Cparameters("REAL",thismodule,["RHOMAX","ZMIN","ZMAX"],[10.0,-10.0,10.0])
xxmin = [sp.sympify(0), -M_PI, ZMIN]
xxmax = [ RHOMAX, M_PI, ZMAX]
RHOCYL = xx[0]
PHICYL = xx[1]
ZCYL = xx[2]
Cart_to_xx[0] = sp.sqrt(Cartx ** 2 + Carty ** 2)
Cart_to_xx[1] = sp.atan2(Carty, Cartx)
Cart_to_xx[2] = Cartz
elif CoordSystem == "SinhCylindrical":
xxmin = [sp.sympify(0), -M_PI, sp.sympify(-1)]
xxmax = [sp.sympify(1), M_PI, sp.sympify(+1)]
AMPLRHO, SINHWRHO, AMPLZ, SINHWZ = par.Cparameters("REAL",thismodule,
["AMPLRHO","SINHWRHO","AMPLZ","SINHWZ"],
[ 10.0, 0.2, 10.0, 0.2])
# Set SinhCylindrical radial & z coordinates by default; overwrite later if CoordSystem == "SinhCylindricalv2".
RHOCYL = AMPLRHO * (sp.exp(xx[0] / SINHWRHO) - sp.exp(-xx[0] / SINHWRHO)) / (sp.exp(1 / SINHWRHO) - sp.exp(-1 / SINHWRHO))
# phi coordinate remains unchanged.
PHICYL = xx[1]
ZCYL = AMPLZ * (sp.exp(xx[2] / SINHWZ) - sp.exp(-xx[2] / SINHWZ)) / (sp.exp(1 / SINHWZ) - sp.exp(-1 / SINHWZ))
Cart_to_xx[0] = SINHWRHO*sp.asinh(sp.sqrt(Cartx ** 2 + Carty ** 2)*sp.sinh(1/SINHWRHO)/AMPLRHO)
Cart_to_xx[1] = sp.atan2(Carty, Cartx)
Cart_to_xx[2] = SINHWZ*sp.asinh(Cartz*sp.sinh(1/SINHWZ)/AMPLZ)
# SinhCylindricalv2 adds the parameters "const_drho", "const_dz", which allows for regions near xx[0]=0
# and xx[2]=0 to have constant rho and z resolution of const_drho and const_dz, provided the sinh() terms
# do not dominate near xx[0]=0 and xx[2]=0.
elif CoordSystem == "SinhCylindricalv2":
xxmin = [sp.sympify(0), -M_PI, sp.sympify(-1)]
xxmax = [sp.sympify(1), M_PI, sp.sympify(+1)]
AMPLRHO, SINHWRHO, AMPLZ, SINHWZ = par.Cparameters("REAL", thismodule,
["AMPLRHO", "SINHWRHO", "AMPLZ", "SINHWZ"],
[ 10.0, 0.2, 10.0, 0.2])
const_drho, const_dz = par.Cparameters("REAL", thismodule, ["const_drho", "const_dz"], [0.0625, 0.0625])
RHOCYL = AMPLRHO * ( const_drho*xx[0] + (sp.exp(xx[0] / SINHWRHO) - sp.exp(-xx[0] / SINHWRHO)) / (sp.exp(1 / SINHWRHO) - sp.exp(-1 / SINHWRHO)) )
PHICYL = xx[1]
ZCYL = AMPLZ * ( const_dz *xx[2] + (sp.exp(xx[2] / SINHWZ ) - sp.exp(-xx[2] / SINHWZ )) / (sp.exp(1 / SINHWZ ) - sp.exp(-1 / SINHWZ )) )
# NO CLOSED-FORM EXPRESSION FOR RADIAL OR Z INVERSION.
# Cart_to_xx[0] = "NewtonRaphson"
# Cart_to_xx[1] = sp.atan2(Carty, Cartx)
# Cart_to_xx[2] = "NewtonRaphson"
xx_to_Cart[0] = RHOCYL*sp.cos(PHICYL)
xx_to_Cart[1] = RHOCYL*sp.sin(PHICYL)
xx_to_Cart[2] = ZCYL
xxSph[0] = sp.sqrt(RHOCYL**2 + ZCYL**2)
xxSph[1] = sp.acos(ZCYL / xxSph[0])
xxSph[2] = PHICYL
scalefactor_orthog[0] = sp.diff(RHOCYL,xx[0])
scalefactor_orthog[1] = RHOCYL
scalefactor_orthog[2] = sp.diff(ZCYL,xx[2])
f0_of_xx0 = RHOCYL
f4_of_xx2 = sp.diff(ZCYL,xx[2])
scalefactor_orthog_funcform[0] = sp.diff(f0_of_xx0_funcform,xx[0])
scalefactor_orthog_funcform[1] = f0_of_xx0_funcform
scalefactor_orthog_funcform[2] = f4_of_xx2_funcform
# Set the unit vectors
UnitVectors = [[ sp.cos(PHICYL), sp.sin(PHICYL), sp.sympify(0)],
[-sp.sin(PHICYL), sp.cos(PHICYL), sp.sympify(0)],
[ sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
elif CoordSystem in ('SymTP', 'SinhSymTP'):
# var1, var2= sp.symbols('var1 var2',real=True)
bScale, SINHWAA, AMAX = par.Cparameters("REAL", thismodule,
["bScale", "SINHWAA", "AMAX"],
[0.5, 0.2, 10.0 ])
# Assuming xx0, xx1, and bScale
# are positive makes nice simplifications of
# unit vectors possible.
xx[0],xx[1] = sp.symbols("xx0 xx1", real=True)
xxmin = [sp.sympify(0), sp.sympify(0), -M_PI]
xxmax = [ AMAX, M_PI, M_PI]
AA = xx[0]
if CoordSystem == "SinhSymTP":
xxmax[0] = sp.sympify(1)
# With xxmax[0] = 1, sinh(xx0/SINHWAA) / sinh(1/SINHWAA) will evaluate to a number between 0 and 1.
# Then AA = AMAX * sinh(xx0/SINHWAA) / sinh(1/SINHWAA) will evaluate to a number between 0 and AMAX.
AA = AMAX * (sp.exp(xx[0] / SINHWAA) - sp.exp(-xx[0] / SINHWAA)) / (sp.exp(1 / SINHWAA) - sp.exp(-1 / SINHWAA))
var1 = sp.sqrt(AA**2 + (bScale * sp.sin(xx[1]))**2)
var2 = sp.sqrt(AA**2 + bScale**2)
RHOSYMTP = AA*sp.sin(xx[1])
PHSYMTP = xx[2]
ZSYMTP = var2*sp.cos(xx[1])
xx_to_Cart[0] = AA *sp.sin(xx[1])*sp.cos(xx[2])
xx_to_Cart[1] = AA *sp.sin(xx[1])*sp.sin(xx[2])
xx_to_Cart[2] = ZSYMTP
xxSph[0] = sp.sqrt(RHOSYMTP**2 + ZSYMTP**2)
xxSph[1] = sp.acos(ZSYMTP / xxSph[0])
xxSph[2] = PHSYMTP
if CoordSystem == "SymTP":
rSph = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
thSph = sp.acos(Cartz / rSph)
phSph = sp.atan2(Carty, Cartx)
# Mathematica script to compute Cart_to_xx[]
# AA = x1;
# var2 = Sqrt[AA^2 + bScale^2];
# RHOSYMTP = AA*Sin[x2];
# ZSYMTP = var2*Cos[x2];
# Solve[{rSph == Sqrt[RHOSYMTP^2 + ZSYMTP^2],
# thSph == ArcCos[ZSYMTP/Sqrt[RHOSYMTP^2 + ZSYMTP^2]],
# phSph == x3},
# {x1, x2, x3}]
Cart_to_xx[0] = sp.sqrt(-bScale**2 + rSph**2 +
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2))*M_SQRT1_2 # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
# The sign() function in the following expression ensures the correct root is taken.
Cart_to_xx[1] = sp.acos(sp.sign(Cartz)*(
sp.sqrt(1 + rSph**2/bScale**2 -
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2)/bScale**2)*M_SQRT1_2)) # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
Cart_to_xx[2] = phSph
elif CoordSystem == "SinhSymTP":
rSph = sp.sqrt(Cartx ** 2 + Carty ** 2 + Cartz ** 2)
thSph = sp.acos(Cartz / rSph)
phSph = sp.atan2(Carty, Cartx)
# Mathematica script to compute Cart_to_xx[]
# AA = x1;
# var2 = Sqrt[AA^2 + bScale^2];
# RHOSYMTP = AA*Sin[x2];
# ZSYMTP = var2*Cos[x2];
# Solve[{rSph == Sqrt[RHOSYMTP^2 + ZSYMTP^2],
# thSph == ArcCos[ZSYMTP/Sqrt[RHOSYMTP^2 + ZSYMTP^2]],
# phSph == x3},
# {x1, x2, x3}]
Cart_to_xx[0] = sp.sqrt(-bScale**2 + rSph**2 +
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2))*M_SQRT1_2 # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
# The sign() function in the following expression ensures the correct root is taken.
Cart_to_xx[1] = sp.acos(sp.sign(Cartz)*(
sp.sqrt(1 + rSph**2/bScale**2 -
sp.sqrt(bScale**4 + 2*bScale**2*rSph**2 + rSph**4 -
4*bScale**2*rSph**2*sp.cos(thSph)**2)/bScale**2)*M_SQRT1_2)) # M_SQRT1_2 = 1/sqrt(2); define this way for UnitTesting
Cart_to_xx[2] = phSph
scalefactor_orthog[0] = sp.diff(AA,xx[0]) * var1 / var2
scalefactor_orthog[1] = var1
scalefactor_orthog[2] = AA * sp.sin(xx[1])
f0_of_xx0 = AA
f1_of_xx1 = sp.sin(xx[1])
f2_of_xx0_xx1 = var1
f3_of_xx0 = var2
scalefactor_orthog_funcform[0] = sp.diff(f0_of_xx0_funcform,xx[0]) * f2_of_xx0_xx1_funcform/f3_of_xx0_funcform
scalefactor_orthog_funcform[1] = f2_of_xx0_xx1_funcform
scalefactor_orthog_funcform[2] = f0_of_xx0_funcform*f1_of_xx1_funcform
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sin(xx[1]) * sp.cos(xx[2]) * var2 / var1,
sp.sin(xx[1]) * sp.sin(xx[2]) * var2 / var1,
AA * sp.cos(xx[1]) / var1],
[AA * sp.cos(xx[1]) * sp.cos(xx[2]) / var1,
AA * sp.cos(xx[1]) * sp.sin(xx[2]) / var1,
-sp.sin(xx[1]) * var2 / var1],
[-sp.sin(xx[2]), sp.cos(xx[2]), sp.sympify(0)]]
#####################################
# CARTESIAN-LIKE COORDINATE SYSTEMS #
#####################################
elif CoordSystem == "Cartesian":
# return values of par.Cparameters() in the following line of code are unused, so we ignore them.
par.Cparameters("REAL",thismodule, ["xmin","xmax","ymin","ymax","zmin","zmax"],
[ -10.0, 10.0, -10.0, 10.0, -10.0, 10.0])
xxmin = ["xmin", "ymin", "zmin"]
xxmax = ["xmax", "ymax", "zmax"]
xx_to_Cart[0] = xx[0]
xx_to_Cart[1] = xx[1]
xx_to_Cart[2] = xx[2]
xxSph[0] = sp.sqrt(xx[0] ** 2 + xx[1] ** 2 + xx[2] ** 2)
xxSph[1] = sp.acos(xx[2] / xxSph[0])
xxSph[2] = sp.atan2(xx[1], xx[0])
Cart_to_xx[0] = Cartx
Cart_to_xx[1] = Carty
Cart_to_xx[2] = Cartz
scalefactor_orthog[0] = sp.sympify(1)
scalefactor_orthog[1] = sp.sympify(1)
scalefactor_orthog[2] = sp.sympify(1)
scalefactor_orthog_funcform[0] = sp.sympify(1)
scalefactor_orthog_funcform[1] = sp.sympify(1)
scalefactor_orthog_funcform[2] = sp.sympify(1)
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sympify(1), sp.sympify(0), sp.sympify(0)],
[sp.sympify(0), sp.sympify(1), sp.sympify(0)],
[sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
elif CoordSystem == "SinhCartesian":
# SinhCartesian coordinates allows us to push the outer boundary of the
# computational domain a lot further away, while keeping reasonably high
# resolution towards the center of the computational grid.
# Set default values for min and max (x,y,z)
xxmin = [sp.sympify(-1), sp.sympify(-1), sp.sympify(-1)]
xxmax = [sp.sympify(+1), sp.sympify(+1), sp.sympify(+1)]
# Declare basic parameters of the coordinate system and their default values
AMPLXYZ, SINHWXYZ = par.Cparameters("REAL", thismodule,
["AMPLXYZ", "SINHWXYZ"],
[ 10.0, 0.2])
# Compute (xx_to_Cart0,xx_to_Cart1,xx_to_Cart2) from (xx0,xx1,xx2)
for ii in [0, 1, 2]:
xx_to_Cart[ii] = AMPLXYZ*(sp.exp(xx[ii]/SINHWXYZ) - sp.exp(-xx[ii]/SINHWXYZ))/(sp.exp(1/SINHWXYZ) - sp.exp(-1/SINHWXYZ))
# Compute (r,th,ph) from (xx_to_Cart2,xx_to_Cart1,xx_to_Cart2)
xxSph[0] = sp.sqrt(xx_to_Cart[0] ** 2 + xx_to_Cart[1] ** 2 + xx_to_Cart[2] ** 2)
xxSph[1] = sp.acos(xx_to_Cart[2] / xxSph[0])
xxSph[2] = sp.atan2(xx_to_Cart[1], xx_to_Cart[0])
# Compute (xx0,xx1,xx2) from (Cartx,Carty,Cartz)
Cart_to_xx[0] = SINHWXYZ*sp.asinh(Cartx*sp.sinh(1/SINHWXYZ)/AMPLXYZ)
Cart_to_xx[1] = SINHWXYZ*sp.asinh(Carty*sp.sinh(1/SINHWXYZ)/AMPLXYZ)
Cart_to_xx[2] = SINHWXYZ*sp.asinh(Cartz*sp.sinh(1/SINHWXYZ)/AMPLXYZ)
# Compute scale factors
scalefactor_orthog[0] = sp.diff(xx_to_Cart[0], xx[0])
scalefactor_orthog[1] = sp.diff(xx_to_Cart[1], xx[1])
scalefactor_orthog[2] = sp.diff(xx_to_Cart[2], xx[2])
f0_of_xx0 = sp.diff(xx_to_Cart[0], xx[0])
f1_of_xx1 = sp.diff(xx_to_Cart[1], xx[1])
f4_of_xx2 = sp.diff(xx_to_Cart[2], xx[2])
scalefactor_orthog_funcform[0] = f0_of_xx0_funcform
scalefactor_orthog_funcform[1] = f1_of_xx1_funcform
scalefactor_orthog_funcform[2] = f4_of_xx2_funcform
# Set the transpose of the matrix of unit vectors
UnitVectors = [[sp.sympify(1), sp.sympify(0), sp.sympify(0)],
[sp.sympify(0), sp.sympify(1), sp.sympify(0)],
[sp.sympify(0), sp.sympify(0), sp.sympify(1)]]
else:
print("CoordSystem == " + CoordSystem + " is not supported.")
sys.exit(1)
# Finally, call ref_metric__hatted_quantities()
# to construct hatted metric, derivs of hatted
# metric, and Christoffel symbols
ref_metric__hatted_quantities(SymPySimplifyExpressions)
# ref_metric__hatted_quantities(scalefactor_orthog_funcform,SymPySimplifyExpressions)
# ref_metric__hatted_quantities(scalefactor_orthog,SymPySimplifyExpressions)
def ref_metric__hatted_quantities(SymPySimplifyExpressions=True):
enable_rfm_precompute = False
if par.parval_from_str(thismodule+"::enable_rfm_precompute") == "True":
enable_rfm_precompute = True
# Step 0: Set dimension DIM
DIM = par.parval_from_str("grid::DIM")
global ReU, ReDD, ghatDD
ReU = ixp.zerorank1()
ReDD = ixp.zerorank2()
ghatDD = ixp.zerorank2()
# Step 1: Compute ghatDD (reference metric), ghatUU
# (inverse reference metric), as well as
# rescaling vector ReU & rescaling matrix ReDD
if not enable_rfm_precompute:
for i in range(DIM):
scalefactor_orthog[i] = sp.sympify(scalefactor_orthog[i])
ghatDD[i][i] = scalefactor_orthog[i]**2
ReU[i] = 1/scalefactor_orthog[i]
for j in range(DIM):
ReDD[i][j] = scalefactor_orthog[i]*scalefactor_orthog[j]
else:
for i in range(DIM):
scalefactor_orthog_funcform[i] = sp.sympify(scalefactor_orthog_funcform[i])
ghatDD[i][i] = scalefactor_orthog_funcform[i]**2
ReU[i] = 1/scalefactor_orthog_funcform[i]
for j in range(DIM):
ReDD[i][j] = scalefactor_orthog_funcform[i]*scalefactor_orthog_funcform[j]
# Step 1b: Compute ghatUU and detgammahat
global ghatUU
global detgammahat
ghatUU = ixp.zerorank2()
ghatUU, detgammahat = ixp.symm_matrix_inverter3x3(ghatDD)
# Step 1c: Sanity check: verify that ReDD, ghatDD,
# and ghatUU are all symmetric rank-2:
for i in range(DIM):
for j in range(DIM):
if ReDD[i][j] != ReDD[j][i]:
print("Error: ReDD["+ str(i) + "][" + str(j) + "] != ReDD["+ str(j) + "][" + str(i) + ": " + str(ReDD[i][j]) + "!=" + str(ReDD[j][i]))
sys.exit(1)
if ghatDD[i][j] != ghatDD[j][i]:
print("Error: ghatDD["+ str(i) + "][" + str(j) + "] != ghatDD["+ str(j) + "][" + str(i) + ": " + str(ghatDD[i][j]) + "!=" + str(ghatDD[j][i]))
sys.exit(1)
if ghatUU[i][j] != ghatUU[j][i]:
print("Error: ghatUU["+ str(i) + "][" + str(j) + "] != ghatUU["+ str(j) + "][" + str(i) + ": " + str(ghatUU[i][j]) + "!=" + str(ghatUU[j][i]))
sys.exit(1)
# Step 2: Compute det(ghat) and its 1st & 2nd derivatives
global detgammahatdD, detgammahatdDD
detgammahatdD = ixp.zerorank1(DIM)
detgammahatdDD = ixp.zerorank2(DIM)
for i in range(DIM):
detgammahatdD[i] = (sp.diff(detgammahat, xx[i]))
for j in range(DIM):
detgammahatdDD[i][j] = sp.diff(detgammahatdD[i], xx[j])
# Step 3a: Compute 1st & 2nd derivatives of rescaling vector.
# (E.g., needed in BSSN for betaUdDD computation)
global ReUdD, ReUdDD
ReUdD = ixp.zerorank2(DIM)
ReUdDD = ixp.zerorank3(DIM)
for i in range(DIM):
for j in range(DIM):
ReUdD[i][j] = sp.diff(ReU[i], xx[j])
for k in range(DIM):
ReUdDD[i][j][k] = sp.diff(ReUdD[i][j], xx[k])
# Step 3b: Compute 1st & 2nd derivatives of rescaling matrix.
global ReDDdD, ReDDdDD
ReDDdD = ixp.zerorank3(DIM)
ReDDdDD = ixp.zerorank4(DIM)
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
ReDDdD[i][j][k] = (sp.diff(ReDD[i][j], xx[k]))
for l in range(DIM):
# Simplifying this doesn't appear to help overall NRPy run time.
ReDDdDD[i][j][k][l] = sp.diff(ReDDdD[i][j][k], xx[l])
# Step 3c: Compute 1st & 2nd derivatives of reference metric.
global ghatDDdD, ghatDDdDD
ghatDDdD = ixp.zerorank3(DIM)
ghatDDdDD = ixp.zerorank4(DIM)
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
if SymPySimplifyExpressions==True:
# ghatDDdD[i][j][k] = sp.trigsimp(sp.diff(ghatDD[i][j], xx[k])) # FIXME: BAD: MUST BE SIMPLIFIED OR ANSWER IS INCORRECT! Must be some bug in sympy...
ghatDDdD[i][j][k] = sp.simplify(sp.diff(ghatDD[i][j], xx[k])) # FIXME: BAD: MUST BE SIMPLIFIED OR ANSWER IS INCORRECT! Must be some bug in sympy...
else:
ghatDDdD[i][j][k] = (sp.diff(ghatDD[i][j], xx[k])) # FIXME: BAD: MUST BE SIMPLIFIED OR ANSWER IS INCORRECT! Must be some bug in sympy...
for l in range(DIM):
ghatDDdDD[i][j][k][l] = (sp.diff(ghatDDdD[i][j][k], xx[l]))
# Step 4a: Compute Christoffel symbols of reference metric.
global GammahatUDD
GammahatUDD = ixp.zerorank3(DIM)
for i in range(DIM):
for k in range(DIM):
for l in range(DIM):
for m in range(DIM):
# GammahatUDD[i][k][l] += sp.trigsimp((sp.Rational(1,2))*ghatUU[i][m]*\
GammahatUDD[i][k][l] += (sp.Rational(1, 2))*ghatUU[i][m]*\
(ghatDDdD[m][k][l] + ghatDDdD[m][l][k] - ghatDDdD[k][l][m])
# Step 4b: Compute derivs of Christoffel symbols of reference metric.
global GammahatUDDdD
GammahatUDDdD = ixp.zerorank4(DIM)
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for l in range(DIM):
GammahatUDDdD[i][j][k][l] = (sp.diff(GammahatUDD[i][j][k], xx[l]))
# Step 4c: If rfm_precompute is disabled, then we are finished with this function.
# Otherwise continue to Step 5.
if not enable_rfm_precompute:
return
# Step 5: Now that all hatted quantities are written in terms of generic SymPy functions,
# we will now replace SymPy functions with simple variables using rigid NRPy+ syntax,
# and store these variables to globals defined above.
def make_replacements(expr):
sympy_version = sp.__version__.replace("rc", "...").replace("b", "...") # Ignore the rc's and b's
# (release candidates & betas).
sympy_version_decimal = float(int(sympy_version.split(".")[0]) + int(sympy_version.split(".")[1]) / 10.0)
is_old_sympy_version = sympy_version_decimal < 1.2
# The derivative representation changed with SymPy 1.2, forcing version-dependent behavior.
# Example: Derivative(f0_of_xx0_funcform(xx0)(xx0), (xx0, 2)) >> f0_of_xx0__DD00
rule = {} # replacement dictionary
for item in sp.preorder_traversal(expr):
if item.func == sp.Derivative:
# extract function name before '_funcform'
strfunc = str(item.args[0]).split('_funcform(', 1)[0]
if is_old_sympy_version:
# extract differentiation variable and derivative order (SymPy <= 1.1)
var, order = str(item.args[1])[2:], len(item.args) - 1
else:
# extract differentiation variable and derivative order (SymPy >= 1.2)
var, order = str(item.args[1][0])[2:], item.args[1][1]
# build derivative operator with format: __DD...D(var)(var)...(var) where
# D and (var) are repeated for every derivative order
oper = '__D' + 'D' * (order - 1) + var * order
# add replacement rule to dictionary
rule[item] = sp.sympify(strfunc + oper)
expr = expr.xreplace(rule)
rule = {}
# Example: f0_of_xx0_funcform(xx0)(xx0) >> f0_of_xx0
for item in sp.preorder_traversal(expr):
if "_funcform" in str(item.func):
# extract function name before '_funcform'
strfunc = str(item.func).split("_funcform", 1)[0]
# add replacement rule to dictionary
rule[item] = sp.sympify(strfunc)
return expr.xreplace(rule)
# Step 6: enable_rfm_precompute: precompute and store in memory
# expressions related to the reference metric (a.k.a.,
# "hatted quantities"). These expressions may involve
# transcendental functions, which are expensive to compute
# within nested loops in C. Hence we precompute them and
# store the result.
# The precomputed "hatted quantity" expressions will be stored in
# a C struct called rfmstruct. As these expressions generally
# involve computationally expensive transcendental functions
# of xx0,xx1,or xx2, and xx0,xx1, and xx2 remain fixed across
# most (if not all) of a given simulation, setting up the
# rfmstruct can greatly improve performance.
# The core challenge in setting up the rfmstruct is collecting
# all the information needed to automatically generate it.
# Step 5 and onwards implements this algorithm, using the
# *generic functional form* of the hatted quantities (as
# opposed to the exact closed-form expressions of the
# hatted quantities) computed above.
detgammahat = make_replacements(detgammahat)
for i in range(DIM):
ReU[i] = make_replacements(ReU[i])
detgammahatdD[i] = make_replacements(detgammahatdD[i])
for j in range(DIM):
ReDD[i][j] = make_replacements(ReDD[i][j])
ReUdD[i][j] = make_replacements(ReUdD[i][j])
ghatDD[i][j] = make_replacements(ghatDD[i][j])
ghatUU[i][j] = make_replacements(ghatUU[i][j])
detgammahatdDD[i][j] = make_replacements(detgammahatdDD[i][j])
for k in range(DIM):
ReDDdD[i][j][k] = make_replacements(ReDDdD[i][j][k])
ReUdDD[i][j][k] = make_replacements(ReUdDD[i][j][k])
ghatDDdD[i][j][k] = make_replacements(ghatDDdD[i][j][k])
GammahatUDD[i][j][k] = make_replacements(GammahatUDD[i][j][k])
for l in range(DIM):
ReDDdDD[i][j][k][l] = make_replacements(ReDDdDD[i][j][k][l])
ghatDDdDD[i][j][k][l] = make_replacements(ghatDDdDD[i][j][k][l])
GammahatUDDdD[i][j][k][l] = make_replacements(GammahatUDDdD[i][j][k][l])
# Step 6: At this point, each expression is written in terms of the generic functions
# of xx0, xx1, and/or xx2 and their derivatives. Depending on the functions, some
# of these derivatives may be zero. In Step 5 we'll evaluate the function
# derivatives exactly and set the expressions to zero. Otherwise in the C code
# we'd be storing performing arithmetic with zeros -- wasteful!
# Step 6.a: Construct the full list of *unique* NRPy+ variables representing the
# SymPy functions and derivatives, so that all zero derivatives can be
# computed.
freevars = []
freevars.extend(detgammahat.free_symbols)
for i in range(DIM):
freevars.extend(ReU[i].free_symbols)
freevars.extend(detgammahatdD[i].free_symbols)
for j in range(DIM):
freevars.extend(ReDD[i][j].free_symbols)
freevars.extend(ReUdD[i][j].free_symbols)
freevars.extend(ghatDD[i][j].free_symbols)
freevars.extend(ghatUU[i][j].free_symbols)
freevars.extend(detgammahatdDD[i][j].free_symbols)
for k in range(DIM):
freevars.extend(ReDDdD[i][j][k].free_symbols)
freevars.extend(ReUdDD[i][j][k].free_symbols)
freevars.extend(ghatDDdD[i][j][k].free_symbols)
freevars.extend(GammahatUDD[i][j][k].free_symbols)
for l in range(DIM):
freevars.extend(ReDDdDD[i][j][k][l].free_symbols)
freevars.extend(ghatDDdDD[i][j][k][l].free_symbols)
freevars.extend(GammahatUDDdD[i][j][k][l].free_symbols)
freevars_uniq = superfast_uniq(freevars)
freevars_uniq_xx_indep = []
for i in range(len(freevars_uniq)):
freevars_uniq_xx_indep.append(freevars_uniq[i])
# Step 6.b: Using the expressions f?_of_xx? set in reference_metric(),
# evaluate each needed derivative and, in the case it is zero,
# set the corresponding "freevar" variable to zero.
freevars_uniq_vals = []
for i, var in enumerate(freevars_uniq):
basename = str(var).split("__")[0].replace("_funcform", "")
derivatv = ""
if "__" in str(var):
derivatv = str(var).split("__")[1].replace("_funcform", "")
if basename == "f0_of_xx0":
basefunc = f0_of_xx0
elif basename == "f1_of_xx1":
basefunc = f1_of_xx1
elif basename == "f2_of_xx0_xx1":
basefunc = f2_of_xx0_xx1
elif basename == "f3_of_xx0":
basefunc = f3_of_xx0
elif basename == "f4_of_xx2":
basefunc = f4_of_xx2
else:
print("Error: function inside " + str(var) + " undefined.")
sys.exit(1)
diff_result = basefunc
if derivatv == "":
pass
else:
derivorder = derivatv.replace("d", "").replace("D", "").replace("0", "0 ").replace("1", "1 ").replace(
"2", "2 ").split(" ")
for derivdirn in derivorder:
if derivdirn != "":
derivwrt = xx[int(derivdirn)]
diff_result = sp.diff(diff_result, derivwrt)
freevars_uniq_vals.append(diff_result)
frees_uniq = superfast_uniq(diff_result.free_symbols)
has_xx_dependence = False
for dirn in range(3):
if gri.xx[dirn] in frees_uniq:
has_xx_dependence = True
if not has_xx_dependence:
freevars_uniq_xx_indep[i] = diff_result
# Step 6.c: Finally, substitute integers for all functions & derivatives that evaluate to integers
for varidx, freevar in enumerate(freevars_uniq):
detgammahat = detgammahat.subs(freevar, freevars_uniq_xx_indep[varidx])
for i in range(DIM):
ReU[i] = ReU[i].subs(freevar, freevars_uniq_xx_indep[varidx])
detgammahatdD[i] = detgammahatdD[i].subs(freevar, freevars_uniq_xx_indep[varidx])
for j in range(DIM):
ReDD[i][j] = ReDD[i][j].subs(freevar, freevars_uniq_xx_indep[varidx])
ReUdD[i][j] = ReUdD[i][j].subs(freevar, freevars_uniq_xx_indep[varidx])
ghatDD[i][j] = ghatDD[i][j].subs(freevar, freevars_uniq_xx_indep[varidx])
ghatUU[i][j] = ghatUU[i][j].subs(freevar, freevars_uniq_xx_indep[varidx])
detgammahatdDD[i][j] = detgammahatdDD[i][j].subs(freevar,
freevars_uniq_xx_indep[varidx])
for k in range(DIM):
ReDDdD[i][j][k] = ReDDdD[i][j][k].subs(freevar, freevars_uniq_xx_indep[varidx])
ReUdDD[i][j][k] = ReUdDD[i][j][k].subs(freevar, freevars_uniq_xx_indep[varidx])
ghatDDdD[i][j][k] = ghatDDdD[i][j][k].subs(freevar, freevars_uniq_xx_indep[varidx])
GammahatUDD[i][j][k] = GammahatUDD[i][j][k].subs(freevar,
freevars_uniq_xx_indep[varidx])
for l in range(DIM):
ReDDdDD[i][j][k][l] = ReDDdDD[i][j][k][l].subs(freevar,
freevars_uniq_xx_indep[varidx])
ghatDDdDD[i][j][k][l] = ghatDDdDD[i][j][k][l].subs(freevar,
freevars_uniq_xx_indep[varidx])
GammahatUDDdD[i][j][k][l] = GammahatUDDdD[i][j][k][l].subs(freevar,
freevars_uniq_xx_indep[varidx])
# Step 7: Construct needed C code for declaring rfmstruct, allocating storage for
# rfmstruct arrays, defining each element in each array, reading the
# rfmstruct data from memory (both with and without SIMD enabled), and
# freeing allocated memory for the rfmstruct arrays.
# struct_str: String that declares the rfmstruct struct.
struct_str = "typedef struct __rfmstruct__ {\n"
define_str = ""
# rfmstruct stores pointers to (so far) 1D arrays. The malloc_str string allocates space for the arrays.
malloc_str = ""
if par.parval_from_str("reference_metric::rfm_precompute_to_Cfunctions_and_NRPy_basic_defines") == "False":
malloc_str = "rfm_struct rfmstruct;\n"
freemm_str = ""
# readvr_str reads the arrays from memory as needed
readvr_str = ["", "", ""]
readvr_SIMD_outer_str = ["", "", ""]
readvr_SIMD_inner_str = ["", "", ""]
# Sort freevars_uniq_vals and freevars_uniq_xx_indep, according to alphabetized freevars_uniq_xx_indep.
# Without this step, the ordering of elements in rfmstruct would be random, and would change each time
# this function was called.
if len(freevars_uniq_xx_indep) > 0:
freevars_uniq_xx_indep, freevars_uniq_vals = (list(x) for x in zip(*sorted(zip(freevars_uniq_xx_indep, freevars_uniq_vals),key=str)))
# Tease out how many variables each function in freevars_uniq_vals
which_freevar = 0
for expr in freevars_uniq_vals:
if "_of_xx" in str(freevars_uniq_xx_indep[which_freevar]):
frees = expr.free_symbols
frees_uniq = superfast_uniq(frees)
xx_list = []
malloc_size = 1
for i in range(3):
if gri.xx[i] in frees_uniq:
xx_list.append(gri.xx[i])
malloc_size *= gri.Nxx_plus_2NGHOSTS[i]
struct_str += "\tREAL *restrict " + str(freevars_uniq_xx_indep[which_freevar]) + ";\n"
malloc_str += "rfmstruct." + str(
freevars_uniq_xx_indep[which_freevar]) + " = (REAL *)malloc(sizeof(REAL)*" + str(malloc_size) + ");\n"
freemm_str += "free(rfmstruct." + str(freevars_uniq_xx_indep[which_freevar]) + ");\n"
output_define_and_readvr = False
for dirn in range(3):
if (gri.xx[dirn] in frees_uniq) and not (gri.xx[(dirn+1)%3] in frees_uniq) and not (gri.xx[(dirn+2)%3] in frees_uniq):
define_str += "for(int i"+str(dirn)+"=0;i"+str(dirn)+"<Nxx_plus_2NGHOSTS"+str(dirn)+";i"+str(dirn)+"++) {\n"
define_str += " const REAL xx"+str(dirn)+" = xx["+str(dirn)+"][i"+str(dirn)+"];\n"
define_str += " rfmstruct." + str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"] = " + str(sp.ccode(freevars_uniq_vals[which_freevar])) + ";\n"
define_str += "}\n\n"
readvr_str[dirn] += "const REAL " + str(freevars_uniq_xx_indep[which_freevar]) + " = rfmstruct->" + \
str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"];\n"
readvr_SIMD_outer_str[dirn] += "const double NOSIMD" + str(
freevars_uniq_xx_indep[which_freevar]) + " = rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"]; "
readvr_SIMD_outer_str[dirn] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ConstSIMD(NOSIMD" + str(freevars_uniq_xx_indep[which_freevar]) + ");\n"
readvr_SIMD_inner_str[dirn] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ReadSIMD(&rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i"+str(dirn)+"]);\n"
output_define_and_readvr = True
if (not output_define_and_readvr) and (gri.xx[0] in frees_uniq) and (gri.xx[1] in frees_uniq):
define_str += """
for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++) for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++) {
const REAL xx0 = xx[0][i0];
const REAL xx1 = xx[1][i1];
rfmstruct.""" + str(freevars_uniq_xx_indep[which_freevar]) + """[i0 + Nxx_plus_2NGHOSTS0*i1] = """ + str(sp.ccode(freevars_uniq_vals[which_freevar])) + """;
}\n\n"""
readvr_str[0] += "const REAL " + str(freevars_uniq_xx_indep[which_freevar]) + " = rfmstruct->" + \
str(freevars_uniq_xx_indep[which_freevar]) + "[i0 + Nxx_plus_2NGHOSTS0*i1];\n"
readvr_SIMD_outer_str[0] += "const double NOSIMD" + str(freevars_uniq_xx_indep[which_freevar]) + \
" = rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i0 + Nxx_plus_2NGHOSTS0*i1]; "
readvr_SIMD_outer_str[0] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ConstSIMD(NOSIMD" + str(freevars_uniq_xx_indep[which_freevar]) + ");\n"
readvr_SIMD_inner_str[0] += "const REAL_SIMD_ARRAY " + str(freevars_uniq_xx_indep[which_freevar]) + \
" = ReadSIMD(&rfmstruct->" + str(freevars_uniq_xx_indep[which_freevar]) + "[i0 + Nxx_plus_2NGHOSTS0*i1]);\n"
output_define_and_readvr = True
if not output_define_and_readvr:
print("ERROR: Could not figure out the (xx0,xx1,xx2) dependency within the expression for "+str(freevars_uniq_xx_indep[which_freevar])+":")
print(str(freevars_uniq_vals[which_freevar]))
sys.exit(1)
which_freevar += 1
struct_str += "} rfm_struct;\n"
# Step 8: Output needed C code to files
outdir = par.parval_from_str(thismodule+"::rfm_precompute_Ccode_outdir")
if par.parval_from_str(thismodule+"::rfm_precompute_to_Cfunctions_and_NRPy_basic_defines") == "False":
with open(os.path.join(outdir, "rfm_struct__declare.h"), "w") as file:
file.write(struct_str)
with open(os.path.join(outdir, "rfm_struct__malloc.h"), "w") as file:
file.write(malloc_str)
with open(os.path.join(outdir, "rfm_struct__define.h"), "w") as file:
file.write(define_str)
with open(os.path.join(outdir, "rfm_struct__freemem.h"), "w") as file:
file.write(freemm_str)
else:
global NRPy_basic_defines_str
NRPy_basic_defines_str = struct_str
global rfm_struct__malloc, rfm_struct__define, rfm_struct__freemem
rfm_struct__malloc = malloc_str
rfm_struct__define = define_str
rfm_struct__freemem = freemm_str
for i in range(3):
with open(os.path.join(outdir, "rfm_struct__read" + str(i) + ".h"), "w") as file:
file.write(readvr_str[i])
with open(os.path.join(outdir, "rfm_struct__SIMD_outer_read" + str(i) + ".h"), "w") as file:
file.write(readvr_SIMD_outer_str[i])
with open(os.path.join(outdir, "rfm_struct__SIMD_inner_read" + str(i) + ".h"), "w") as file:
file.write(readvr_SIMD_inner_str[i])
####################################################
# Core Jacobian (basis) transformation functions,
# for reference metric basis to/from the
# Cartesian basis.
# We define Jacobians relative to the reference metric
# basis at a point x^j_rfm=(xx0,xx1,xx2)_rfm on the source grid:
#
# Jac_dUCart_dDrfmUD[i][j] = dx^i_Cart / dx^j_rfm
#
# via exact differentiation (courtesy SymPy), and the inverse Jacobian
#
# Jac_dUrfm_dDCartUD[i][j] = dx^i_rfm / dx^j_Cart
#
# using NRPy+'s generic_matrix_inverter3x3() function
def compute_Jacobian_and_inverseJacobian_tofrom_Cartesian():
# Step 2.a: First construct Jacobian matrix:
Jac_dUCart_dDrfmUD = ixp.zerorank2()
for i in range(3):
for j in range(3):
Jac_dUCart_dDrfmUD[i][j] = sp.diff(xx_to_Cart[i], xx[j])
Jac_dUrfm_dDCartUD, dummyDET = ixp.generic_matrix_inverter3x3(Jac_dUCart_dDrfmUD)
return Jac_dUCart_dDrfmUD, Jac_dUrfm_dDCartUD
def basis_transform_vectorU_from_rfmbasis_to_Cartesian(Jac_dUCart_dDrfmUD, src_vectorU):
Cart_dst_vectorU = ixp.zerorank1()
for i in range(3):
for l in range(3):
Cart_dst_vectorU[i] += Jac_dUCart_dDrfmUD[i][l] * src_vectorU[l]
return Cart_dst_vectorU
def basis_transform_vectorD_from_rfmbasis_to_Cartesian(Jac_dUrfm_dDCartUD, src_vectorD):
Cart_dst_vectorD = ixp.zerorank1()
for i in range(3):
for l in range(3):
Cart_dst_vectorD[i] += Jac_dUrfm_dDCartUD[l][i] * src_vectorD[l]
return Cart_dst_vectorD
def basis_transform_tensorDD_from_rfmbasis_to_Cartesian(Jac_dUrfm_dDCartUD, src_tensorDD):
Cart_dst_tensorDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
for l in range(3):
for m in range(3):
Cart_dst_tensorDD[i][j] += Jac_dUrfm_dDCartUD[l][i]*Jac_dUrfm_dDCartUD[m][j]*src_tensorDD[l][m]
return Cart_dst_tensorDD
def basis_transform_vectorU_from_Cartesian_to_rfmbasis(Jac_dUrfm_dDCartUD, Cart_src_vectorU):
rfm_dst_vectorU = ixp.zerorank1()
for i in range(3):
for l in range(3):
rfm_dst_vectorU[i] += Jac_dUrfm_dDCartUD[i][l] * Cart_src_vectorU[l]
return rfm_dst_vectorU
def basis_transform_vectorD_from_Cartesian_to_rfmbasis(Jac_dUCart_dDrfmUD, Cart_src_vectorD):
rfm_dst_vectorD = ixp.zerorank1()
for i in range(3):
for l in range(3):
rfm_dst_vectorD[i] += Jac_dUCart_dDrfmUD[l][i] * Cart_src_vectorD[l]
return rfm_dst_vectorD
def basis_transform_tensorDD_from_Cartesian_to_rfmbasis(Jac_dUCart_dDrfmUD, Cart_src_tensorDD):
rfm_dst_tensorDD = ixp.zerorank2()
for i in range(3):
for j in range(3):
for l in range(3):
for m in range(3):
rfm_dst_tensorDD[i][j] += Jac_dUCart_dDrfmUD[l][i]*Jac_dUCart_dDrfmUD[m][j]*Cart_src_tensorDD[l][m]
return rfm_dst_tensorDD
##################################################
def get_EigenCoord():
CoordSystem_orig = par.parval_from_str("reference_metric::CoordSystem")
for EigenCoordstr in ["Spherical", "Cylindrical", "SymTP", "Cartesian"]:
if EigenCoordstr in CoordSystem_orig:
return EigenCoordstr
print("Error: Could not find EigenCoord for reference_metric::CoordSystem == "+CoordSystem_orig)
sys.exit(1)
# Compute proper distance in all 3 directions. Used to find the appropriate timestep for the CFL condition.
def ds_dirn(delxx, append_gridsuffix_to_xx=False):
gridsuffix = "" # Disable for now
scalefactor_orthog_inj = []
for i in range(3):
if append_gridsuffix_to_xx:
scalefactor_orthog_inj.append(scalefactor_orthog[i].
subs(xx[0], sp.sympify(str(xx[0]) + gridsuffix)).
subs(xx[1], sp.sympify(str(xx[1]) + gridsuffix)).
subs(xx[2], sp.sympify(str(xx[2]) + gridsuffix)))
else:
scalefactor_orthog_inj.append(scalefactor_orthog[i])
ds_dirn = ixp.zerorank1(3)
for i in range(3):
ds_dirn[i] = delxx[i]*scalefactor_orthog_inj[i]
return ds_dirn
# Find the appropriate timestep for the CFL condition.
def add_to_Cfunction_dict__find_timestep(rel_path_to_Cparams=os.path.join("./"), enable_mask=False,
output_dt_local_h_only=False, use_unit_wavespeed=False):
gridsuffix = "" # Disable for now
##############################
# Step 1: Function description
desc = "Find the CFL-constrained timestep"
##############################
# Step 2: Function return type
c_type = "REAL"
##############################
# Step 3: Function name
name = "find_timestep" + gridsuffix
##############################
# Step 4: Prior to the main loop
preloop = " REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision."
##############################
# Step 5: Loop options
loopopts = "InteriorPoints,Read_xxs,DisableOpenMP"
if gridsuffix != "":
loopopts += ","+gridsuffix
##############################
# Step 6: function input parameters
params = "const paramstruct *restrict params, REAL *restrict xx[3], const REAL CFL_FACTOR"
if enable_mask:
params += ", int8_t *restrict mask"
##############################
# Step 7: function body
# Compute proper distance in all 3 directions.
if output_dt_local_h_only:
ds_drn = ds_dirn(gri.dxx, append_gridsuffix_to_xx=True)
else:
ds_drn = ds_dirn(gri.dxx, append_gridsuffix_to_xx=False)
ds_dirn_h = outputC([ds_drn[0], ds_drn[1], ds_drn[2]], ["ds_dirn0", "ds_dirn1", "ds_dirn2"], "returnstring").\
replace("dxx0", "dxx0"+gridsuffix).\
replace("dxx1", "dxx1"+gridsuffix).\
replace("dxx2", "dxx2"+gridsuffix)
body = "REAL ds_dirn0, ds_dirn1, ds_dirn2;\n" + ds_dirn_h + """
#ifndef MIN
#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )
#endif\n"""
indent = ""
if enable_mask:
body += "if(mask[IDX3S" + gridsuffix + "(i0,i1,i2)] >= 0) {\n"
indent = " "
if not output_dt_local_h_only:
# not output_dt_local_h_only -> seeking dsmin over the entire grid, over all directions
body += indent + "// Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2):\n"
body += indent + "dsmin = MIN(dsmin, MIN(ds_dirn0, MIN(ds_dirn1, ds_dirn2)));\n"
else:
# output_dt_local_h_only means we seek a minimum over all directions at given gridpoint only
body += indent + "// Set dt_local["+gridsuffix.replace("_grid", "")+"] = MIN(ds_dirn0, ds_dirn1, ds_dirn2) * CFL_FACTOR/wavespeed :\n"
body += indent + "dt_local["+gridsuffix.replace("_grid", "")+"] = MIN(ds_dirn0, MIN(ds_dirn1, ds_dirn2)) * CFL_FACTOR/wavespeed;\n"
if use_unit_wavespeed:
body = body.replace("wavespeed", "1.0")
if enable_mask:
body += "}\n"
if output_dt_local_h_only:
return body.\
replace("ds_dirn0", "ds_dirn0"+gridsuffix).\
replace("ds_dirn1", "ds_dirn1"+gridsuffix).\
replace("ds_dirn2", "ds_dirn2"+gridsuffix)
##############################
# Step 8: after the loop
postloop = " return dsmin*CFL_FACTOR/wavespeed;\n"
if use_unit_wavespeed:
postloop = postloop.replace("wavespeed", "1.0")
##############################
# Step 9: add to Cfunction dictionary
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc =desc,
c_type =c_type,
name =name,
params =params,
preloop =preloop,
body =body,
loopopts=loopopts,
postloop=postloop,
rel_path_to_Cparams=rel_path_to_Cparams)
# Find the appropriate timestep for the CFL condition.
def add_to_Cfunc_dict__find_dsmin(rel_path_to_Cparams=os.path.join("./")):
gridsuffix = "" # Disable for now
desc = "Find dsmin = min_i sqrt(ghat_{ii} dx^i dx^i)"
c_type = "REAL"
name = "find_dsmin" + gridsuffix
params = "const paramstruct *restrict params, const int i0i1i2[3], const REAL *restrict xx[3]"
body = """
REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision.
const REAL xx0 = xx[0][i0i1i2[0]];
const REAL xx1 = xx[1][i0i1i2[1]];
const REAL xx2 = xx[2][i0i1i2[2]];
"""
# Compute proper distance in all 3 directions.
ds_drn = ds_dirn(gri.dxx, append_gridsuffix_to_xx=False)
body += " REAL ds_dirn0, ds_dirn1, ds_dirn2;\n"
body += outputC([ds_drn[0], ds_drn[1], ds_drn[2]], ["ds_dirn0", "ds_dirn1", "ds_dirn2"], "returnstring",
params="outCverbose=false,includebraces=false").\
replace("dxx0", "dxx0"+gridsuffix).\
replace("dxx1", "dxx1"+gridsuffix).\
replace("dxx2", "dxx2"+gridsuffix)
body += """
#ifndef MIN
#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )
#endif\n"""
indent = ""
body += indent + " // Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2):\n"
body += indent + " return MIN(dsmin, MIN(ds_dirn0, MIN(ds_dirn1, ds_dirn2)));\n"
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc =desc,
c_type =c_type,
name =name,
params =params,
body =body,
rel_path_to_Cparams=rel_path_to_Cparams)
def out_default_free_parameters_for_rfm(free_parameters_file,
domain_size=1.0,sinh_width=0.4,sinhv2_const_dr=0.05,SymTP_bScale=0.5):
CoordSystem = par.parval_from_str("reference_metric::CoordSystem")
with open(free_parameters_file, "a") as file:
file.write("""
// Set free-parameter values.
const REAL domain_size = """ + str(domain_size) + """;
const REAL sinh_width = """ + str(sinh_width) + """;
const REAL sinhv2_const_dr= """ + str(sinhv2_const_dr) + """;
const REAL SymTP_bScale = """ + str(SymTP_bScale) + ";\n")
coordparams = ""
if CoordSystem == "Spherical":
coordparams += """
params.RMAX = domain_size;\n"""
elif "SinhSpherical" in CoordSystem:
coordparams += """
params.AMPL = domain_size;
params.SINHW= sinh_width;\n"""
if CoordSystem == "SinhSphericalv2":
coordparams += " params.const_dr = sinhv2_const_dr;\n"
elif "SymTP" in CoordSystem:
coordparams += """
params.bScale = SymTP_bScale;
params.AMAX = domain_size;\n"""
if CoordSystem == "SinhSymTP":
coordparams += " params.SINHWAA = sinh_width;\n"
elif CoordSystem == "Cartesian":
coordparams += """
params.xmin = -domain_size, params.xmax = domain_size;
params.ymin = -domain_size, params.ymax = domain_size;
params.zmin = -domain_size, params.zmax = domain_size;\n"""
elif CoordSystem =="SinhCartesian":
coordparams += """
params.AMPLX = domain_size;
params.SINHWX = sinh_width;
params.AMPLY = domain_size;
params.SINHWY = sinh_width;
params.AMPLZ = domain_size;
params.SINHWZ = sinh_width;\n"""
elif CoordSystem == "Cylindrical":
coordparams += """
params.ZMIN = -domain_size;
params.ZMAX = domain_size;
params.RHOMAX = domain_size;\n"""
elif "SinhCylindrical" in CoordSystem:
coordparams += """
params.AMPLRHO = domain_size;
params.SINHWRHO= sinh_width;
params.AMPLZ = domain_size;
params.SINHWZ = sinh_width;\n"""
if CoordSystem == "SinhCylindricalv2":
coordparams += """
params.const_drho = sinhv2_const_dr;
params.const_dz = sinhv2_const_dr;\n"""
file.write(coordparams + "\n")
############################
## TO BE DEPRECATED:
def set_Nxx_dxx_invdx_params__and__xx_h(outdir=".",grid_centering="cell"):
if grid_centering not in ('cell', 'vertex'):
print("rfm.set_Nxx_dxx_invdx_params__and__xx_h(): grid_centering = \""+grid_centering+"\" not supported!")
sys.exit(1)
with open(os.path.join(outdir,"set_Nxx_dxx_invdx_params__and__xx.h"),"w") as file:
file.write(r"""
void set_Nxx_dxx_invdx_params__and__xx(const int EigenCoord, const int Nxx[3],
paramstruct *restrict params, REAL *restrict xx[3]) {
// Override parameter defaults with values based on command line arguments and NGHOSTS.
params->Nxx0 = Nxx[0];
params->Nxx1 = Nxx[1];
params->Nxx2 = Nxx[2];
params->Nxx_plus_2NGHOSTS0 = Nxx[0] + 2*NGHOSTS;
params->Nxx_plus_2NGHOSTS1 = Nxx[1] + 2*NGHOSTS;
params->Nxx_plus_2NGHOSTS2 = Nxx[2] + 2*NGHOSTS;
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
#include "set_Cparameters.h"
REAL xxmin[3],xxmax[3];
if(EigenCoord == 0) {
""")
for i in range(3):
file.write(" xxmin["+str(i)+"] = "+str(xxmin[i])+";\n")
file.write(" xxmax["+str(i)+"] = "+str(xxmax[i])+";\n")
file.write("""
} else { // if (EigenCoord == 1)
""")
CoordSystem_orig = par.parval_from_str("reference_metric::CoordSystem")
par.set_parval_from_str("reference_metric::CoordSystem",get_EigenCoord())
reference_metric()
for i in range(3):
file.write(" xxmin["+str(i)+"] = "+str(xxmin[i])+";\n")
file.write(" xxmax["+str(i)+"] = "+str(xxmax[i])+";\n")
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem_orig)
reference_metric()
file.write("""
}
params->dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)Nxx[0]);
params->dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)Nxx[1]);
params->dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)Nxx[2]);
params->invdx0 = 1.0/params->dxx0;
params->invdx1 = 1.0/params->dxx1;
params->invdx2 = 1.0/params->dxx2;\n""")
# The following capability was suggested by Terrence Pierre Jacques (Thanks!)
cell_offset = "(1.0/2.0)" # default cell-centered offset
cell_comment = "Cell-centered grid."
if grid_centering == "vertex":
cell_offset = "0.0"
cell_comment = "Vertex-centered grid."
file.write("""
// Now that params.dxx{0,1,2} and params.invdxx{0,1,2} have been set,
// Step 0d.iii: Set up uniform coordinate grids
xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0);
for(int j=0;j<Nxx_plus_2NGHOSTS0;j++)
xx[0][j] = xxmin[0] + ((REAL)(j-NGHOSTS) + """+cell_offset+""")*params->dxx0; // """+cell_comment+"""
xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1);
for(int j=0;j<Nxx_plus_2NGHOSTS1;j++)
xx[1][j] = xxmin[1] + ((REAL)(j-NGHOSTS) + """+cell_offset+""")*params->dxx1; // """+cell_comment+"""
xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2);
for(int j=0;j<Nxx_plus_2NGHOSTS2;j++)
xx[2][j] = xxmin[2] + ((REAL)(j-NGHOSTS) + """+cell_offset+""")*params->dxx2; // """+cell_comment+"""
//fprintf(stderr,"hey inside setxx: %e %e %e | %e %e\\n",xxmin[0],xxmin[1],xxmin[2],xx[0][0],params->dxx0);
}
""")
def xx_to_Cart_h(funcname,cparamsloc,outfile):
# Arbitrary-coordinate NRPy+ file output, Part 1: output the conversion from (x0,x1,x2) to Cartesian (x,y,z)
Cout = outputC([xx_to_Cart[0],xx_to_Cart[1],xx_to_Cart[2]],
["xCart[0]","xCart[1]","xCart[2]"],
"returnstring",params="preindent=1")
with open(outfile, "w") as file:
file.write("""
static inline void """+funcname+"""(const paramstruct *restrict params, REAL *restrict xx[3],const int i0,const int i1,const int i2, REAL xCart[3]) {
#include """+"\""+cparamsloc+"\""+"""
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];\n"""+Cout+"}\n")
#######################
## C FUNCTIONS RELATED TO REFERENCE METRIC
# Construct Cart_to_xx_and_nearest_i0i1i2() C function for
# mapping from Cartesian->xx for the chosen CoordSystem.
def add_to_Cfunc_dict__Cart_to_xx_and_nearest_i0i1i2(rel_path_to_Cparams=os.path.join("./"), relative_to="local_grid_center"):
gridsuffix = "" # Disable for now
CoordSystem = par.parval_from_str("reference_metric::CoordSystem")
prefunc = ""
desc = """Given Cartesian point (x,y,z), this function outputs the corresponding
(xx0,xx1,xx2) and the "closest" (i0,i1,i2) for the given grid"""
namesuffix = ""
if relative_to == "global_grid_center":
namesuffix = "_" + relative_to
name = "Cart_to_xx_and_nearest_i0i1i2" + namesuffix + gridsuffix
params = "const paramstruct *restrict params, const REAL xCart[3], REAL xx[3], int Cart_to_i0i1i2[3]"
preloop = ""
if relative_to == "local_grid_center":
preloop = """
// First compute the closest (xx0,xx1,xx2) to the given Cartesian gridpoint (x,y,z),
// *relative* to the center of the local grid.
// So for example,
// 1) if global xCart[012] = (1,1,1), and the
// origin of the grid is at global xCart (x,y,z) = (1,1,1), then
// (Cartx,Carty,Cartz) = (0,0,0)
// 2) if global xCart[012] = (0,0,0), and the
// origin of the grid is at global xCart (x,y,z) = (1,1,1), then
// (Cartx,Carty,Cartz) = (-1,-1,-1)
// Therefore, (Cartx,Carty,Cartz) = (xCart[0]-originx, xCart[1]-originy, xCart[2]-originz)
const REAL Cartx = xCart[0] - Cart_originx_GRIDSFX_;
const REAL Carty = xCart[1] - Cart_originy_GRIDSFX_;
const REAL Cartz = xCart[2] - Cart_originz_GRIDSFX_;
""".replace("_GRIDSFX_", gridsuffix)
elif relative_to == "global_grid_center":
preloop = """
const REAL Cartx = xCart[0];
const REAL Carty = xCart[1];
const REAL Cartz = xCart[2];
"""
else:
print("Error: relative_to must be set to either local_grid_center or global_grid_center. " + relative_to + " was chosen.")
sys.exit(1)
if "theta_adj" in CoordSystem:
body = outputC([Cart_to_xx[0], Cart_to_xx[1], Cart_to_xx[2]],
["xx[0]", "const REAL target_th", "xx[2]"], "returnstring", params="includebraces=False,preindent=1")
body += " xx[1] = NewtonRaphson_get_xx1_from_th(params, target_th);\n"
else:
body = outputC([Cart_to_xx[0], Cart_to_xx[1], Cart_to_xx[2]],
["xx[0]", "xx[1]", "xx[2]"], "returnstring", params="includebraces=False,preindent=1")
body += """
// Then find the nearest index (i0,i1,i2) on underlying grid to (x,y,z)
// Recall that:
// xx[0][j] = xxmin[0] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*params->dxx0"""+gridsuffix+"""; // Cell-centered grid.
// --> j = (int) ( (xx[0][j] - xxmin[0]) / params->dxx0"""+gridsuffix+""" + (1.0/2.0) + NGHOSTS )
Cart_to_i0i1i2[0] = (int)( ( xx[0] - ("""+str(xxmin[0])+""") ) / params->dxx0"""+gridsuffix+""" + (1.0/2.0) + NGHOSTS - 0.5 ); // Account for (int) typecast rounding down
Cart_to_i0i1i2[1] = (int)( ( xx[1] - ("""+str(xxmin[1])+""") ) / params->dxx1"""+gridsuffix+""" + (1.0/2.0) + NGHOSTS - 0.5 ); // Account for (int) typecast rounding down
Cart_to_i0i1i2[2] = (int)( ( xx[2] - ("""+str(xxmin[2])+""") ) / params->dxx2"""+gridsuffix+""" + (1.0/2.0) + NGHOSTS - 0.5 ); // Account for (int) typecast rounding down
"""
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
prefunc=prefunc,
desc =desc,
c_type ="void",
name =name,
params =params,
preloop=preloop,
body =body,
rel_path_to_Cparams=rel_path_to_Cparams)
def add_to_Cfunc_dict_set_Nxx_dxx_invdx_params__and__xx(rel_path_to_Cparams=os.path.join("./"), NGHOSTS_is_a_param=False):
gridsuffix = "" # Disable for now.
def set_xxmin_xxmax():
outstr = ""
for dirn in range(3):
outstr += " xxmin[" + str(dirn) + "] = " + str(xxmin[dirn]) + ";\n"
outstr += " xxmax[" + str(dirn) + "] = " + str(xxmax[dirn]) + ";\n"
return outstr
body = """
// Override parameter defaults with values based on command line arguments and NGHOSTS.
params->Nxx0""" + gridsuffix + r""" = Nxx[0];
params->Nxx1""" + gridsuffix + r""" = Nxx[1];
params->Nxx2""" + gridsuffix + r""" = Nxx[2];
"""
NGHOSTS_prefix=""
if NGHOSTS_is_a_param:
NGHOSTS_prefix="params->"
body += """
params->Nxx_plus_2NGHOSTS0""" + gridsuffix + """ = Nxx[0] + 2*"""+NGHOSTS_prefix+"""NGHOSTS;
params->Nxx_plus_2NGHOSTS1""" + gridsuffix + """ = Nxx[1] + 2*"""+NGHOSTS_prefix+"""NGHOSTS;
params->Nxx_plus_2NGHOSTS2""" + gridsuffix + """ = Nxx[2] + 2*"""+NGHOSTS_prefix+"""NGHOSTS;
// Now that params->Nxx_plus_2NGHOSTS* has been set, and below we need e.g., Nxx_plus_2NGHOSTS*, we include set_Cparameters.h here:
#include \"""" + os.path.join(rel_path_to_Cparams, "set_Cparameters.h") + """\"
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
REAL xxmin[3],xxmax[3];
if(EigenCoord == 0) {
""" + set_xxmin_xxmax() + """ } else { // if (EigenCoord == 1)
"""
CoordSystem_orig = par.parval_from_str("reference_metric::CoordSystem")
# If we are using a "holey" Spherical-like coordinate, for certain grids xx0min>0 is
# such that xx[0][0] is negative, which causes "Cartesian disagreement" errors.
if "Spherical" not in CoordSystem_orig:
par.set_parval_from_str("reference_metric::CoordSystem", get_EigenCoord())
reference_metric()
body += set_xxmin_xxmax()
par.set_parval_from_str("reference_metric::CoordSystem", CoordSystem_orig)
reference_metric()
else:
body += set_xxmin_xxmax()
# Now set grid spacing dxx, invdx = 1/dxx, and xx[]
body += """ }
// Step 0d.iii: Set params.dxx{0,1,2}, params.invdx{0,1,2}, and uniform coordinate grids xx[3][]
"""
for dirn in ["0", "1", "2"]:
body += " params->dxx"+dirn+gridsuffix+" = (xxmax["+dirn+"] - xxmin["+dirn+"]) / ((REAL)Nxx["+dirn+"]);\n"
body += " params->invdx"+dirn+gridsuffix+" = 1.0/params->dxx"+dirn+gridsuffix+";\n"
body += """ xx["""+dirn+"""] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS"""+dirn+gridsuffix + """);
for(int j=0;j<Nxx_plus_2NGHOSTS"""+dirn+gridsuffix+""";j++)
xx["""+dirn+"""][j] = xxmin["""+dirn+"""] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*params->dxx"""+dirn+gridsuffix+"""; // Cell-centered grid.\n"""
if dirn != "2":
body += "\n"
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc ="Override default values for Nxx{0,1,2}, Nxx_plus_2NGHOSTS{0,1,2}, dxx{0,1,2}, and invdx{0,1,2}; and set xx[3][]",
c_type="void",
name ="set_Nxx_dxx_invdx_params__and__xx"+gridsuffix,
params="const int EigenCoord, const int Nxx[3],paramstruct *restrict params, REAL *restrict xx[3]",
body =body,
enableCparameters=False, uses_rfm=True) # Cparameters here must be #include'd in body, not at top of function as usual.
def add_to_Cfunc_dict_xx_to_Cart(rel_path_to_Cparams=os.path.join("./")):
gridsuffix = "" # Disable for now
# Arbitrary-coordinate NRPy+ file output, Part 1: output the conversion from (x0,x1,x2) to Cartesian (x,y,z)
# Suppose grid origin is at 1,1,1. Then the Cartesian gridpoint at 1,2,3 will be 2,3,4; hence
# the xx_to_Cart[i]+gri.Cart_origin[i] below:
body = """
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
""" + outputC([xx_to_Cart[0]+gri.Cart_origin[0],
xx_to_Cart[1]+gri.Cart_origin[1],
xx_to_Cart[2]+gri.Cart_origin[2]],
["xCart[0]", "xCart[1]", "xCart[2]"],
"returnstring", params="preindent=1"). \
replace("Cart_originx", "Cart_originx" + gridsuffix).\
replace("Cart_originy", "Cart_originy" + gridsuffix).\
replace("Cart_originz", "Cart_originz" + gridsuffix)
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc ="Compute Cartesian coordinates given local grid coordinate (xx0,xx1,xx2), "
" accounting for the origin of this grid being possibly offcenter.",
c_type ="void",
name ="xx_to_Cart"+gridsuffix,
params ="const paramstruct *restrict params, REAL *restrict xx[3],const int i0,const int i1,const int i2, REAL xCart[3]",
body =body,
rel_path_to_Cparams=rel_path_to_Cparams)
# Find the appropriate timestep for the CFL condition.
def add_to_Cfunction_dict_find_timestep():
# Compute proper distance in all 3 directions.
delxx = ixp.declarerank1("dxx", DIM=3)
ds_drn = ds_dirn(delxx)
ds_dirn_h = outputC([ds_drn[0], ds_drn[1], ds_drn[2]], ["ds_dirn0", "ds_dirn1", "ds_dirn2"],"returnstring")
desc="Find the CFL-constrained timestep"
add_to_Cfunction_dict(
desc =desc,
c_type ="REAL",
name ="find_timestep",
params ="const paramstruct *restrict params, REAL *restrict xx[3]",
preloop ="REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision.",
body ="REAL ds_dirn0, ds_dirn1, ds_dirn2;\n"+ds_dirn_h+"""
#ifndef MIN
#define MIN(A, B) ( ((A) < (B)) ? (A) : (B) )
#endif
// Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2);
dsmin = MIN(dsmin,MIN(ds_dirn0,MIN(ds_dirn1,ds_dirn2)));
""",
loopopts ="InteriorPoints,Read_xxs,DisableOpenMP",
postloop ="return dsmin*CFL_FACTOR/wavespeed;\n")
def out_timestep_func_to_file(outfile):
add_to_Cfunction_dict_find_timestep()
with open(outfile, "w") as file:
file.write(outC_function_dict["find_timestep"])
def register_C_functions_and_NRPy_basic_defines(rel_path_to_Cparams=os.path.join("./"), enable_rfm_precompute=False,
use_unit_wavespeed_for_find_timestep=False):
add_to_Cfunction_dict__find_timestep(rel_path_to_Cparams=rel_path_to_Cparams, enable_mask=False,
output_dt_local_h_only=False,
use_unit_wavespeed=use_unit_wavespeed_for_find_timestep)
add_to_Cfunc_dict_xx_to_Cart(rel_path_to_Cparams=rel_path_to_Cparams)
add_to_Cfunc_dict_set_Nxx_dxx_invdx_params__and__xx(rel_path_to_Cparams=rel_path_to_Cparams)
for frame in "local", "global":
add_to_Cfunc_dict__Cart_to_xx_and_nearest_i0i1i2(rel_path_to_Cparams=rel_path_to_Cparams,
relative_to=frame + "_grid_center")
if enable_rfm_precompute:
if par.parval_from_str(thismodule+"::rfm_precompute_to_Cfunctions_and_NRPy_basic_defines") == "True":
# global rfm_struct__malloc, rfm_struct__define, rfm_struct__freemem
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc="Reference Metric Precomputation infrastructure: Allocate memory for rfmstruct",
c_type="void",
name="rfm_precompute_rfmstruct_malloc",
params="const paramstruct *restrict params, rfm_struct *restrict rfmstruct",
body=indent_Ccode(rfm_struct__malloc.replace("rfmstruct.", "rfmstruct->")),
rel_path_to_Cparams=rel_path_to_Cparams)
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc="Reference Metric Precomputation infrastructure: Define rfmstruct",
c_type="void",
name="rfm_precompute_rfmstruct_define",
params="const paramstruct *restrict params, REAL *restrict xx[3], rfm_struct *restrict rfmstruct",
body=indent_Ccode(rfm_struct__define.replace("rfmstruct.", "rfmstruct->")),
rel_path_to_Cparams=rel_path_to_Cparams)
add_to_Cfunction_dict(
includes=[os.path.join(rel_path_to_Cparams, "NRPy_basic_defines.h")],
desc="Reference Metric Precomputation infrastructure: Free rfmstruct memory",
c_type="void",
name="rfm_precompute_rfmstruct_freemem",
params="const paramstruct *restrict params, rfm_struct *restrict rfmstruct",
body=indent_Ccode(rfm_struct__freemem.replace("rfmstruct.", "rfmstruct->")),
rel_path_to_Cparams=rel_path_to_Cparams)
outC_NRPy_basic_defines_h_dict["reference_metric"] = NRPy_basic_defines_str
else:
outC_NRPy_basic_defines_h_dict["reference_metric"] = """#include "rfm_files/rfm_struct__declare.h"\n"""
| 50.764476 | 176 | 0.590695 | [
"BSD-2-Clause"
] | Harmohit-Singh/nrpytutorial | reference_metric.py | 78,025 | Python |
if __name__ == "__main__":
assert currentProgram is None
assert state.getProject() is not None
print("programless_script executed successfully")
| 26.333333 | 53 | 0.740506 | [
"MIT"
] | Defense-Cyber-Crime-Center/pyhidra | tests/programless_script.py | 158 | Python |
from ..config import config
import logging
import os
import shutil
import subprocess
from subprocess import CalledProcessError, Popen
from time import sleep
logging.basicConfig(
level=config.log_level, format='%(asctime)s | %(levelname)s | %(message)s')
PORT = config.port
def rm_gen_dir():
try:
shutil.rmtree('generated-tests')
except(FileNotFoundError):
pass
def deps_installed():
return shutil.which('oatts') is not None and shutil.which('mocha') is not None
def run_oatts():
logging.info('Running oatts tests...')
if not deps_installed():
logging.error('oatts is not installed! See the README.')
exit(0)
rm_gen_dir()
try:
subprocess.run(['oatts', 'generate',
'-w', 'generated-tests',
'-s', 'swagger/api.spec.yaml',
'--host', 'localhost:{}'.format(PORT),
'--customValuesFile', 'test/values.json'], check=True)
subprocess.run(
['mocha', '--recursive', 'generated-tests'], check=True)
except(CalledProcessError):
logging.error('oatts tests failed!')
finally:
rm_gen_dir()
server_process = None
def start_server():
global server_process
env = os.environ.copy()
env['INSIGHTS_CONNEXION_ENV'] = 'test'
server_process = Popen(['pipenv', 'run', 'server'], env=env)
sleep(5)
def test():
try:
logging.info('Testing...')
start_server()
run_oatts()
logging.info('Testing is done')
except() as err:
logging.error(err)
finally:
server_process.terminate()
| 23.628571 | 82 | 0.602177 | [
"MIT"
] | fijshion/insights_connexion | insights_connexion/test/oatts.py | 1,654 | Python |
import os
import numpy as np
import random
from nn_activations import sigmoid, sigmoid_prime
class NeuralNetwork(object):
def __init__(self, sizes=list(), learning_rate=1.0, mini_batch_size=16,
epochs=10):
"""Initialize a Neural Network model.
Parameters
----------
sizes : list, optional
A list of integers specifying number of neurns in each layer. Not
required if a pretrained model is used.
learning_rate : float, optional
Learning rate for gradient descent optimization. Defaults to 1.0
mini_batch_size : int, optional
Size of each mini batch of training examples as used by Stochastic
Gradient Descent. Denotes after how many examples the weights
and biases would be updated. Default size is 16.
"""
# Input layer is layer 0, followed by hidden layers layer 1, 2, 3...
self.sizes = sizes
self.num_layers = len(sizes)
# First term corresponds to layer 0 (input layer). No weights enter the
# input layer and hence self.weights[0] is redundant.
self.weights = [np.array([0])] + [np.random.randn(y, x) for y, x in
zip(sizes[1:], sizes[:-1])]
# Input layer does not have any biases. self.biases[0] is redundant.
self.biases = [np.random.randn(y, 1) for y in sizes]
# Input layer has no weights, biases associated. Hence z = wx + b is not
# defined for input layer. self.zs[0] is redundant.
self._zs = [np.zeros(bias.shape) for bias in self.biases]
# Training examples can be treated as activations coming out of input
# layer. Hence self.activations[0] = (training_example).
self._activations = [np.zeros(bias.shape) for bias in self.biases]
self.mini_batch_size = mini_batch_size
self.epochs = epochs
self.eta = learning_rate
def fit(self, training_data, validation_data=None):
"""Fit (train) the Neural Network on provided training data. Fitting is
carried out using Stochastic Gradient Descent Algorithm.
Parameters
----------
training_data : list of tuple
A list of tuples of numpy arrays, ordered as (image, label).
validation_data : list of tuple, optional
Same as `training_data`, if provided, the network will display
validation accuracy after each epoch.
"""
for epoch in range(self.epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k + self.mini_batch_size] for k in
range(0, len(training_data), self.mini_batch_size)]
for mini_batch in mini_batches:
nabla_b = [np.zeros(bias.shape) for bias in self.biases]
nabla_w = [np.zeros(weight.shape) for weight in self.weights]
for x, y in mini_batch:
self._forward_prop(x)
delta_nabla_b, delta_nabla_w = self._back_prop(x, y)
nabla_b = [nb + dnb for nb,
dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw,
dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [
w - (self.eta / self.mini_batch_size) * dw for w, dw in
zip(self.weights, nabla_w)]
self.biases = [
b - (self.eta / self.mini_batch_size) * db for b, db in
zip(self.biases, nabla_b)]
if validation_data:
accuracy = self.validate(validation_data) / 100.0
print("Epoch {0}, accuracy {1} %.".format(epoch + 1, accuracy))
else:
print("Processed epoch {0}.".format(epoch))
def validate(self, validation_data):
"""Validate the Neural Network on provided validation data. It uses the
number of correctly predicted examples as validation accuracy metric.
Parameters
----------
validation_data : list of tuple
Returns
-------
int
Number of correctly predicted images.
"""
validation_results = [(self.predict(x) == y)
for x, y in validation_data]
return sum(result for result in validation_results)
def predict(self, x):
"""Predict the label of a single test example (image).
Parameters
----------
x : numpy.array
Returns
-------
int
Predicted label of example (image).
"""
self._forward_prop(x)
return np.argmax(self._activations[-1])
def _forward_prop(self, x):
self._activations[0] = x
for i in range(1, self.num_layers):
self._zs[i] = (
self.weights[i].dot(self._activations[i - 1]) + self.biases[i]
)
self._activations[i] = sigmoid(self._zs[i])
def _back_prop(self, x, y):
nabla_b = [np.zeros(bias.shape) for bias in self.biases]
nabla_w = [np.zeros(weight.shape) for weight in self.weights]
error = (self._activations[-1] - y) * sigmoid_prime(self._zs[-1])
nabla_b[-1] = error
nabla_w[-1] = error.dot(self._activations[-2].transpose())
for l in range(self.num_layers - 2, 0, -1):
error = np.multiply(
self.weights[l + 1].transpose().dot(error),
sigmoid_prime(self._zs[l])
)
nabla_b[l] = error
nabla_w[l] = error.dot(self._activations[l - 1].transpose())
return nabla_b, nabla_w
def load(self, filename='model.npz'):
"""Prepare a neural network from a compressed binary containing weights
and biases arrays. Size of layers are derived from dimensions of
numpy arrays.
Parameters
----------
filename : str, optional
Name of the ``.npz`` compressed binary in models directory.
"""
npz_members = np.load(os.path.join(os.curdir, 'models', filename))
self.weights = list(npz_members['weights'])
self.biases = list(npz_members['biases'])
# Bias vectors of each layer has same length as the number of neurons
# in that layer. So we can build `sizes` through biases vectors.
self.sizes = [b.shape[0] for b in self.biases]
self.num_layers = len(self.sizes)
# These are declared as per desired shape.
self._zs = [np.zeros(bias.shape) for bias in self.biases]
self._activations = [np.zeros(bias.shape) for bias in self.biases]
# Other hyperparameters are set as specified in model. These were cast
# to numpy arrays for saving in the compressed binary.
self.mini_batch_size = int(npz_members['mini_batch_size'])
self.epochs = int(npz_members['epochs'])
self.eta = float(npz_members['eta'])
def save(self, filename='model.npz'):
"""Save weights, biases and hyperparameters of neural network to a
compressed binary. This ``.npz`` binary is saved in 'models' directory.
Parameters
----------
filename : str, optional
Name of the ``.npz`` compressed binary in to be saved.
"""
np.savez_compressed(
file=os.path.join(os.curdir, 'models', filename),
weights=self.weights,
biases=self.biases,
mini_batch_size=self.mini_batch_size,
epochs=self.epochs,
eta=self.eta
)
| 37.164251 | 80 | 0.579488 | [
"MIT"
] | fredwangwang/webcam-sudoku-solver | src/nn_model.py | 7,693 | Python |
import numpy as np
import logging
import os, errno
from datetime import datetime
from ..abstract import Environment
from maddux.environment import Environment
from maddux.objects import Ball
from maddux.robots import simple_human_arm
class RobotArm(Environment):
def __init__(self, env, training_directory, config):
self.discrete = False
self.training_directory = training_directory
self.config = config
self.env = env
self.arm = self.env.robot
self.current = self.arm.end_effector_position()
self.ball = self.env.dynamic_objects[0]
self.target = self.ball.position
self.static_objects = self.env.static_objects
# self.recording_queue = []
logging.info("Robot Arm: End effector starts at {}".format(self.current))
logging.info("Target: Ball at {}".format(self.target))
def reset(self):
"""
Reset current position to beginning.
"""
self.arm.reset()
self.current = self.arm.end_effector_position()
def act(self, location, population, params, master):
"""
Move end effector to the given location
"""
valid = True
past = self.current
self.current = location
if population % self.config['record_iterations'] == 0 and master:
print("Recording")
try:
self.arm.ikine(location)
timestamp = datetime.now().strftime("%m-%d-%Y_%H-%M-%S")
training_path = self.training_directory + "/records/"
try:
os.makedirs(training_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
record_path = training_path + "pop_" + str(population) + ".npy"
video_path = training_path + "pop_" + str(population) + ".mp4"
self.arm.save_path(record_path)
self.env.animate(duration=5.0, save_path=video_path)
np.save(training_path + "net_" + str(population), params)
# self.recording_queue.append(record_path)
except ValueError as e:
valid = False
logging.warn("Could not solve IK for position: {}". format(location[0]))
# logging.info("Current Position: {}".format(self.current))
return valid
def inputs(self, t):
"""
Return the inputs for the neural network
"""
inputs = [self.current[0], self.current[1], self.current[2], self.target[0], self.target[1], self.target[2], t+1]
return inputs
def reward_params(self, valid):
"""
Return the parameters for the proposed reward function
"""
# params = [(self.current, self.target), (self.current, self.static_objects)]
params = (self.current, self.target)
return params
def pre_processing(self):
"""
Complete any pending post processing tasks
"""
pass
def post_processing(self):
"""
Complete any pending post processing tasks
"""
# logging.debug("Recording Videos")
# for path in self.recording_queue:
# self.env.animate(duration=5.0, save_path=path)
# logging.debug("Completed recording all videos")
pass
def reached_target(self):
return np.linalg.norm(self.current - self.target) < 3
| 30.302083 | 115 | 0.702991 | [
"MIT"
] | callaunchpad/MOR | environments/robot_arm/robot_arm.py | 2,909 | Python |
"""
This module enables the clustering of DataFrame headers into
like clusters based on correlations between columns
"""
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as sch
from gretel_synthetics.utils import stats
LEFT = 0
RIGHT = 1
def _get_correlation_matrix(df, numeric_cat: List[str] = None):
if numeric_cat is None:
numeric_cat = []
nominal_columns = list(df.select_dtypes(include=["object", "category"]).columns)
nominal_columns.extend(x for x in numeric_cat if x not in nominal_columns)
corr_matrix = stats.calculate_correlation(df, nominal_columns=nominal_columns)
return corr_matrix
def _get_leaves(tree, node, totcolcnt):
return_list = []
stack = []
curr = node
def _walk(node: int, side: int, save=False):
# If it's a leaf, return a list with this leaf
if node < totcolcnt:
if save:
return_list.append(node)
return None
# else perculate
else:
node = int(node - totcolcnt)
child = int(tree[node][side])
return child
while True:
if curr is not None:
stack.append(curr)
curr = _walk(curr, LEFT)
elif stack:
curr = stack.pop()
curr = _walk(curr, RIGHT, save=True)
else:
break
return return_list
def _traverse_node(tree, node, maxsize, totcolcnt):
stack = []
node_list = []
curr = node
def _walk(node: int, side: int):
child = int(tree[node][side])
child_size = 1
idx = 0
if child > totcolcnt:
idx = child - totcolcnt
child_size = tree[idx][3]
if child_size > maxsize:
return idx
else:
node_list.append(_get_leaves(tree, child, totcolcnt))
return None
while True:
if curr is not None:
stack.append(curr)
curr = _walk(curr, LEFT)
elif stack:
curr = stack.pop()
curr = _walk(curr, RIGHT)
else:
break
return node_list
def _merge_clusters(
clusters: List[List[int]], maxlen: int, columns: List[str], Lopt, plot=False
) -> List[List[str]]:
out = []
tmp = []
cluster_map = {} # maps a column name => cluster number
cluster_number = 0
for cluster in clusters:
# if the size of adding the next cluster
# exceeds the max size, flush
if len(tmp) + len(cluster) > maxlen:
for column_name in tmp:
cluster_map[column_name] = cluster_number
out.append(tmp)
tmp = []
cluster_number += 1
tmp.extend(
[columns[idx] for idx in cluster]
) # build the tmp with the actual column names
# attach the final cluster
if tmp:
cluster_number += 1
out.append(tmp)
for column_name in tmp:
cluster_map[column_name] = cluster_number
if plot:
labels = [x + "(" + str(cluster_map[x]) + ")" for x in columns]
plt.figure(figsize=(25, 8))
plt.title("Field Header Correlation Cluster Hierarchy")
sch.dendrogram(Lopt, labels=labels, leaf_rotation=90.0)
return out
def cluster(
df: pd.DataFrame,
header_prefix: List[str] = None,
maxsize: int = 20,
method: str = "single",
numeric_cat: List[str] = None,
plot=False,
) -> List[List[str]]:
"""
Given an input dataframe, extract clusters of similar headers
based on a set of heuristics.
Args:
df: The dataframe to cluster headers from.
header_prefix: List of columns to remove before cluster generation.
maxsize: The max number of header clusters to generate
from the input dataframe.
method: Linkage method used to compute header cluster
distances. For more information please refer to the scipy
docs, https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy-cluster-hierarchy-linkage.
numeric_cat: A list of fields to define as categorical. The header
clustering code will automatically define pandas "object" and
"category" columns as categorical. The ``numeric_cat`` parameter
may be used to define additional categorical fields that may
not automatically get identified as such.
plot: Plot header list as a dendogram.
"""
def prepare_response(
col_list: List[List[str]], prefix: List[str] = None
) -> List[List[str]]:
if prefix is not None:
col_list[0] = prefix + col_list[0]
return col_list
if numeric_cat is None:
numeric_cat = []
if header_prefix is not None:
try:
df = df.drop(header_prefix, axis=1)
except KeyError as err:
raise ValueError("Header prefixes do not all exist in source DF") from err
# Bug(jm): if the number of columns left in the DF is just one
# we just return that single column
if df.shape[1] == 1:
return prepare_response([list(df.columns)], header_prefix)
# Start by getting the correlation matrix
corr_matrix = _get_correlation_matrix(df, numeric_cat)
# Change it into a distance matrix
X = 1 - np.array(1 - abs(corr_matrix))
# Cluster the columns
L = sch.linkage(X, method=method)
# Optimize the leaf ordering to minimize the distance between adjacent leaves
Lopt = sch.optimal_leaf_ordering(L, X)
columns = df.columns
start = len(Lopt) - 1
# Start at the top of the cluster hierachy with the final two clusters that were merged together
# We will recursively work our way down, fetching the subclusters of a cluster if the current
# cluster size > maxsize
clusters = _traverse_node(Lopt, start, maxsize, len(columns))
# At this point we have one list of column ids, where groups are seperated by -1, translate it into a list of
# header lists, and if plot=True, plot the dendogram
col_list = _merge_clusters(clusters, maxsize, columns, Lopt, plot)
return prepare_response(col_list, header_prefix)
| 30.398058 | 140 | 0.626956 | [
"Apache-2.0"
] | andrewnc/gretel-synthetics | src/gretel_synthetics/utils/header_clusters.py | 6,262 | Python |
"""
Plugin architecture, based on decorators
References:
1. https://play.pixelblaster.ro/blog/2017/12/18/a-quick-and-dirty-mini-plugin-system-for-python/
"""
| 23.285714 | 100 | 0.736196 | [
"MIT"
] | huuhoa/colusa | src/colusa/plugins/__init__.py | 163 | Python |
# Generated by Django 2.2.10 on 2020-03-20 15:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import src.auth.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(blank=True, max_length=40, null=True, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=40, null=True, verbose_name='last name')),
('display_name', models.CharField(blank=True, max_length=14, null=True, verbose_name='display name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'db_table': 'auth_user',
'abstract': False,
},
managers=[
('objects', src.auth.models.MyUserManager()),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='profile', serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('avatar_url', models.CharField(blank=True, max_length=256, null=True)),
('dob', models.DateField(blank=True, null=True, verbose_name='dob')),
],
options={
'db_table': 'user_profile',
},
),
]
| 56.741379 | 266 | 0.637496 | [
"BSD-3-Clause"
] | SerhatTeker/django-bank-allauth-rest | src/auth/migrations/0001_initial.py | 3,291 | Python |
import os
# system parameters
GPUS = [0]
DATALOADER_WORKERS = 8
# optimization parameters
BATCH_SIZE = 1
EPOCHS = 50
LR = 0.0001
WEIGHT_DECAY = 0.0005
MOMENTUM = 0.9
# image pre-processing parameters
GAUSSIAN_VALUE = 0
# directory locations
HOME_DIR = "/home/mbc2004"
DATASET_DIR = "/home/mbc2004/datasets"
MODEL_SRC_DIR = "/home/mbc2004/models"
BASE_MODEL_DIR = "base_models"
MODEL_SAVE_DIR = "saved_models"
# input parameters
INPUT_FRAMES = 64 # 16
application_list = ['block_construction_timed', 'block_construction', 'ikea', 'crepe_action', 'crepe_recipe']
def default_model_params():
class Params:
def __init__(self,
gpus=GPUS,
dataloader_workers=DATALOADER_WORKERS,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
lr=LR,
weight_decay=WEIGHT_DECAY,
momentum=MOMENTUM,
gaussian_value=GAUSSIAN_VALUE,
home_dir=HOME_DIR,
model_save_dir=MODEL_SAVE_DIR,
base_model_dir=BASE_MODEL_DIR,
input_frames=INPUT_FRAMES
):
self.gpus = gpus
self.dataloader_workers = dataloader_workers
self.batch_size = batch_size
self.epochs = epochs # number of epochs to run experiments for
self.lr = lr
self.weight_decay = weight_decay # ?
self.momentum = momentum # ?
self.gaussian_value = gaussian_value
self.home_dir = home_dir
self.base_model_dir = base_model_dir
self.model_save_dir = model_save_dir
self.input_frames = input_frames
self.model = "unassigned"
self.application = "unassigned"
class ApplicationDef:
def __init__(self, app):
self.app = app
self.masking = True
if app == "block_construction":
self.file_directory = os.path.join(DATASET_DIR, "BlockConstruction")
self.trace_file = os.path.join(self.file_directory, "traces6.npy")
self.obs_label_list = {"n": 0, "r": 1, "rr": 2, "rrr": 3, "g": 4, "gb": 5, "bg": 6, "b": 7}
self.act_label_list = {"N": 0, "R": 1, "G": 2, "B": 3}
# models
self.tsm = {"filename": "c_backbone_tsm_1_bn16", "bottleneck": 16}
self.wrn = {"filename": "c_backbone_wrn_2_bn16", "bottleneck": 16}
self.i3d = {"filename": "c_backbone_i3d_1_bn8", "bottleneck": 8}
self.vgg = {"filename": "c_backbone_vgg_2_bn32", "bottleneck": 32}
elif app == "block_construction_timed":
self.file_directory = os.path.join(DATASET_DIR, "BlockConstructionTimed")
self.trace_file = os.path.join(self.file_directory, "traces6.npy")
self.obs_label_list = {"n": 0, "r": 1, "rr": 2, "rrr": 3, "g": 4, "gb": 5, "bg": 6, "b": 7}
self.act_label_list = {"N": 0, "R": 1, "G": 2, "B": 3}
# models
self.tsm = {"filename": "c_backbone_tsm_1_bn16", "bottleneck": 16}
self.wrn = {"filename": "c_backbone_wrn_0_bn16", "bottleneck": 16}
self.i3d = {"filename": "c_backbone_i3d_1_bn16", "bottleneck": 16}
self.vgg = {"filename": "c_backbone_vgg_0_bn32", "bottleneck": 32}
elif app == "ikea":
self.file_directory = os.path.join(DATASET_DIR, "IKEA_fa")
label_path = os.path.join(*[self.file_directory, "frames", "train"])
self.obs_label_list = {k: v for v, k in enumerate(os.listdir(label_path))}
self.act_label_list = None # Activity Recognition Dataset
self.masking = False
# models
self.tsm = {"filename": "c_backbone_tsm_0", "bottleneck": 64}
self.wrn = {"filename": "c_backbone_wrn_0", "bottleneck": 64}
self.i3d = {"filename": "c_backbone_i3d_0", "bottleneck": 64}
self.vgg = {"filename": "c_backbone_vgg_0", "bottleneck": 64}
elif app == "crepe_action":
self.file_directory = os.path.join(DATASET_DIR, "CrepeAction")
label_path = os.path.join(*[self.file_directory, "frames", "train"])
self.obs_label_list = {k: v for v, k in enumerate(sorted(os.listdir(label_path)))}
self.act_label_list = None # Activity Recognition Dataset
self.masking = True
# models
self.tsm = {"filename": "c_backbone_tsm_1", "bottleneck": 64}
self.wrn = {"filename": "c_backbone_wrn_2", "bottleneck": 64}
self.i3d = {"filename": "c_backbone_i3d_1", "bottleneck": 64}
self.vgg = {"filename": "c_backbone_vgg_0", "bottleneck": 64}
elif app == "crepe_recipe":
self.file_directory = os.path.join(DATASET_DIR, "CrepeRecipe")
label_path = os.path.join(*[self.file_directory, "frames", "train"])
self.obs_label_list = {k: v for v, k in enumerate(sorted(os.listdir(label_path)))}
self.act_label_list = None # Activity Recognition Dataset
self.masking = True
# models
self.tsm = {"filename": "c_backbone_tsm_1", "bottleneck": 64}
self.wrn = {"filename": "c_backbone_wrn_2", "bottleneck": 64}
self.i3d = {"filename": "c_backbone_i3d_1", "bottleneck": 64}
self.vgg = {"filename": "c_backbone_vgg_0", "bottleneck": 64}
self.num_labels = len(self.obs_label_list)
def set_application(self, app):
self.application = self.ApplicationDef(app)
self.base_model_dir += '_' + app
self.model_save_dir += '_' + app
class ModelDef:
def __init__(self, model_id, bottleneck_size, original_size, iad_frames, spatial_size,
backbone_class, pretrain_model_name=None, save_id=None, end_point=-1):
self.end_point = end_point
self.model_id = model_id
self.bottleneck_size = bottleneck_size
self.original_size = original_size[self.end_point]
self.iad_frames = iad_frames[self.end_point]
self.spatial_size = spatial_size
self.backbone_class = backbone_class
self.pretrain_model_name = pretrain_model_name
self.save_id = save_id
def set_model_params(self, model_id, end_point=-1):
from enums import Backbone
assert self.application != "unassigned", "ERROR: call the set_application function before the set_model_params function"
if model_id == Backbone.TSM:
from model.backbone_model.backbone_tsm import BackboneTSM as backbone_class
pretrain_model_name = os.path.join(MODEL_SRC_DIR,
"TSM_somethingv2_RGB_resnet101_shift8_blockres_avg_segment8_e45.pth")
save_id = self.application.tsm["filename"]
bottleneck = self.application.tsm["bottleneck"]
self.model = self.ModelDef("tsm", bottleneck, [2048], [64], 7, backbone_class,
pretrain_model_name=pretrain_model_name,
save_id=save_id)
elif model_id == Backbone.WRN:
from model.backbone_model.backbone_wrn import BackboneWideResNet as backbone_class
save_id = self.application.wrn["filename"]
bottleneck = self.application.wrn["bottleneck"]
self.model = self.ModelDef("wrn", bottleneck, [2048], [64], 7, backbone_class,
save_id=save_id)
elif model_id == Backbone.VGG:
from model.backbone_model.backbone_vgg import BackboneVGG as backbone_class
save_id = self.application.vgg["filename"]
bottleneck = self.application.vgg["bottleneck"]
self.model = self.ModelDef("vgg", bottleneck, [512], [64], 7, backbone_class,
save_id=save_id)
elif model_id == Backbone.I3D:
original_size = [64, 192, 256, 832, 1024, 128]#1024
iad_frames = [32, 32, 32, 16, 8, 8]
from model.backbone_model.backbone_i3d import BackboneI3D as backbone_class
pretrain_model_name = os.path.join(MODEL_SRC_DIR, "rgb_imagenet.pt")
save_id = self.application.i3d["filename"]
bottleneck = self.application.i3d["bottleneck"]
self.model = self.ModelDef("i3d", bottleneck, original_size, iad_frames, 7, backbone_class,
pretrain_model_name=pretrain_model_name,
save_id=save_id,
end_point=end_point)
return Params()
| 44.601896 | 132 | 0.551376 | [
"MIT"
] | AssistiveRoboticsUNH/temporal_feature_lfd | parameter_parser.py | 9,411 | Python |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class PolicyRuleSmbClientGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[PolicyRuleSmbClient]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.PolicyRuleSmbClient]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[PolicyRuleSmbClient]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyRuleSmbClientGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyRuleSmbClientGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyRuleSmbClientGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.238462 | 524 | 0.616375 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flasharray/FA_2_5/models/policy_rule_smb_client_get_response.py | 4,971 | Python |
from util.dc_verilog_parser import *
def main():
folder = "../dc/sub/adder8/"
# folder = "../dc/boom/implementation/"
total_nodes = 0
total_edges = 0
ntype = set()
for v in os.listdir(folder):
if v.startswith("hier"):
continue
vf = os.path.join(folder, v)
print("parsing {}...".format(vf))
# parser = DcParser("BoomCore", ["alu_DP_OP", "add_x"])
parser = DcParser("test1", [ "add_x"], "hadd_s")
nodes, edges = parser.parse(vf, label_region=True)
print("nodes {}, edges {}".format(len(nodes), len(edges)))
# nodes, edges = parser.clip(nodes, edges)
nodes, edges = parser.remove_div(nodes, edges)
adder_out_type = collections.defaultdict(int)
node_type = collections.defaultdict(int)
for n in nodes:
if n[1]["is_output"]:
adder_out_type[n[1]["type"]] += 1
node_type[n[1]["type"]] += 1
print(node_type)
print(adder_out_type)
print("clipped: nodes {}, edges {}".format(len(nodes), len(edges)))
for n in nodes:
ntype.add(n[1]["type"])
total_nodes += len(nodes)
total_edges += len(edges)
# return
print(ntype)
print(total_nodes, total_edges)
if __name__ == "__main__":
# dc_parser("../dc/simple_alu/implementation/alu_d0.20_r2_bounded_fanout_adder.v")
main()
# cProfile.run("main()") | 34.309524 | 86 | 0.574601 | [
"Apache-2.0"
] | ZeayW/graph-contrastive-learning | util1/find_central.py | 1,441 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tablo', '0003_auto_20160106_1509'),
]
operations = [
migrations.CreateModel(
name='FeatureServiceLayerRelations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('related_index', models.PositiveIntegerField(default=0)),
('related_title', models.CharField(max_length=255)),
('source_column', models.CharField(max_length=255)),
('target_column', models.CharField(max_length=255)),
('layer', models.ForeignKey(to='tablo.FeatureServiceLayer')),
],
),
]
| 31.961538 | 95 | 0.602888 | [
"BSD-3-Clause"
] | nrdsdata/tablo | tablo/migrations/0004_featureservicelayerrelations.py | 831 | Python |
# Library of fault injection functions called at runtime for common operations in TensorFlow
# NOTE: These are called by the corresponding functions inserted in the TensorFlow graph at RUNTIME
import tensorflow as tf
import numpy as np
import logging
from fiConfig import *
from fiLog import *
from threading import current_thread
# FIXME: Add this to the list of dependencies for this module
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.extmath import softmax
# global variable to determine fine grained levels of logging
# WARNING: Setting these to True may generate a lot of log data
logReturn = False # log return values of functions
logArgs = False # log arguments of operators
logInjection = True # log fault injection and checking
# This is the initialization function for the config file
# and is called from TensorFI.py's constructor
# NOTE: This has to be in this module or else fiConf won't be accessible
def initFIConfig(fiParams):
"Initialize the global variable fiConf with the params"
global fiConf
global count
# instance of the current op (e.g., 3 ADD op means 3 instances of ADD op)
global visitedOp
# random instance of the op to be injected
global randInstanceMap
# order of the current op (e.g., the sequence of the current op in all of the op in the dataflow graph)
global totalVistedOp
# which op to be injected in the whole run
global injectedOp
fiConf = FIConfig(fiParams)
logging.debug("Initialized config file : " + str(fiConf))
# Setup the random seed for the fault injector if one is specified
if fiConf.faultSeed: np.random.seed( fiConf.faultSeed )
# Initialize the count of the selected operations to 0 (for skipCount)
count = 0
visitedOp = {}
randInstanceMap = {}
totalVistedOp = 0
injectedOp = 0
return fiConf
# End of fiConfing
def getFIConfig():
"Return the fiConfig that was initialized"
global fiConf
return fiConf
# End of getFIConfig
# These functions have to do with the faultLog and are called from TensorFI.py
faultLogs = { } # Global map of Threads to their fault logs
def initFILog(name):
"Initialize the fault injection log - optionally specify a thread number"
global faultLogs
global logName
logName = name
faultLog = FILog(logName)
# Add the fault log to the log for the current thread
current = current_thread()
faultLogs[ current ] = faultLog
# logging.debug("Initialized faultLog for thread " + str(current) + " as " + logName)
# End of initFILog
def getCurrentFaultLog():
"Return the fault log for the current thread (if it exists), add it otherwise"
# Precondition: faultLogs != None
global faultLogs
global logName
current = current_thread()
faultLog = None
# If we cannot find the faultLog for the current thread, add it to the faultLogs
# FIXME: This doesn't work because TensorFlow uses its own threading infrastructure
# and ThreadIDs are not the same during log creation time and log access time
# So we always end up writing to the first entry of the faultLogs dictionary
if not faultLogs.has_key(current):
# logging.debug("Cannot find fault log for " + str(current) )
faultLog = FILog(logName + "-" + current.name)
faultLogs[ current ] = faultLog
# faultLog = faultLogs.values()[0]
else:
# Otherwise, return the fault log for the current thread
faultLog = faultLogs[current]
# logging.debug("Returning fault log " + str(faultLog) + " for thread " + str(current) )
return faultLog
# End of getCurrentFaultLog
def logRun(runCount):
"Update the run count in the log file"
global count
# Reset the count on a new run
count = 0
faultLog = getCurrentFaultLog() # Get the fault log for the current thread
# Log the runCount and start a new section of the logFile
faultLog.updateRunCount( runCount )
faultLog.dashedLine()
# End of logRun
# These are the basic fault injection functions that're called at runtime
# NOTE: We need to first call initFIConfig before these are called
def perturb(val):
"Inject a single fault in res - fault type depends on config param"
# Precoditions: injectScalar != None && injectTensor != None
faultLog = getCurrentFaultLog() # Get the fault log for the current thread
isScalar = np.isscalar(val)
vType = val.dtype
if logInjection:
logging.debug("\tPerturbing " + str(val) + " of type: " + str(vType) + " isScalar: " + str(isScalar) )
# Check if the object is a scalar or a tensor, and call the corresponding injection function
if isScalar:
res = fiConf.injectScalar( vType, val.copy())
else:
res = fiConf.injectTensor( vType, val.copy())
# Enter an entry in the fault log that we injected a fault here
faultLog.updateOriginal( val )
faultLog.updateInjected( res )
#if logInjection: logging.debug("\t TESTING ... res = " + str(res))
return res
# End of perturb
def condPerturb(op, res):
"Calls the perturb function if and only if the op Operation is included for injection"
# Pre-condition: injectMap != None && skipCount != None
global count # Keeps track of how many times the selected operation(s) are executed
global visitedOp
faultLog = getCurrentFaultLog() # Get the fault log for the current thread
if logInjection:
logging.debug("\tChecking if operation " + str(op) + " is chosen for injection")
# Check if the operation is chosen for injection and if so, inject a fault
if fiConf.isSelected(op):
count = count + 1 # If it's selected, then update the execution count
if logInjection: logging.debug("\t TESTING ... Operation " + str(op) + " is chosen for injection")
# Enter the op and count in the faultLog - as we won't have access to it later
# NOTE: This is not actually written to the logFIle till faultLog.commit is called
# so we won't write to the log if a fault is not injected into it
faultLog.updateOp( op )
faultLog.updateCount( count )
# If the operation exceeds the number of times it is to be skipped (default=0)
if (count > fiConf.skipCount):
"(1) inject faults based on the error rate"
if(fiConf.injectMode == "errorRate" ):
# Retreive the probability of perturbing this instruction
# and generate a random number in the interval [0, 1]
# and only perturb it only if the random no. <= the probability
prob = fiConf.getProbability(op)
#if logInjection: logging.debug("\t TESTING ... PROB = " + str(prob))
rn = np.random.random() # random.random returns a number in [0, 1]
#if logInjection: logging.debug("\t TESTING ... np = " + str(rn))
if (rn <= prob):
res = perturb(res) # Perturb is called to inject the fault
faultLog.commit() # Write the log entry to the fault log
"(2) inject faults based on the dynamic instance of op, i.e., inject one instance for each op"
if(fiConf.injectMode == "dynamicInstance"):
# Retreive the total instances of this instruction
# each operation will be injected once only
# and generate a random number to select a random instance of the operation
# and only perturb it only if the current instance has been selected
instance = fiConf.getInstance(op)
if logInjection: logging.debug("\t TESTING ... instance = " + instance)
# You can manually specify the instance here rather than using the random instances
# So that you can inject fault into a target operator
# E.g., randInstanceMap[op] = instance of op to be injected
if (not randInstanceMap.has_key(op)):
# random instance of the selected op to be injected
randInstanceMap[op] = np.random.randint(low=1, high=instance+1)
# first instance of the op
if(not visitedOp.has_key(op)): visitedOp[op] = 1
# not the first instance of op
else: visitedOp[op] += 1
# determine if the current instance is selected for injection
if(visitedOp[op] == randInstanceMap[op]):
res = perturb(res)
faultLog.updateInjectedInstance(randInstanceMap[op], instance)
faultLog.commit()
# current run has finished, re-initialize the visit table for the next run
# used when you need to do injection on the same op in the next run
if(visitedOp[op] == instance):
visitedOp[op] = 0
"(3) inject one fault per run"
if(fiConf.injectMode == "oneFaultPerRun"):
# refer the global variable for memorizing the order of the current op
global totalVistedOp
global injectedOp
# get the amount of total op
totalInstance = fiConf.totalInstance
totalVistedOp += 1
# select one random op to be injected in the whole run
if(injectedOp == 0):
injectedOp = np.random.randint(low=1, high=totalInstance+1)
# inject fault at the output of the operation
if(totalVistedOp == injectedOp):
res = perturb(res)
faultLog.updateInjectedInstance(injectedOp, totalInstance)
faultLog.commit()
# current run has finished, re-initialize the visit table for the next run (optional)
if(totalVistedOp == totalInstance):
totalVistedOp = 0
injectedOp = 0
# Done with if count
# Done with if isSelected
#if logInjection: logging.debug("\t TESTING ... Operation " + str(op) + " is NOT chosen for injection")
return res
# End of condPerturb
# This is a specialized function to cast into values of different types
def castType(type):
"Returns the appropriate injection function based on the type"
# Create specialized functions for each type
# FIXME: Only 4 types are supported now. Support more types later.
def castFloat32(value):
logging.debug("Casting to " + str(type))
return np.float32(value)
def castInt32(value):
logging.debug("Casting to " + str(type))
return np.int32(value)
def castInt64(value):
logging.debug("Casting to " + str(type))
return np.int64(value)
def castFloat64(value):
logging.debug("Casting to " + str(type))
return np.float64(value)
# Check the type parameter and return the appropriate function
if (type==np.float32):
return castFloat32
elif (type==np.int32):
return castInt32
elif (type==np.int64):
return castInt64
elif (type==np.float64):
return castFloat64
else:
raise TypeError("Unknown type " + type)
return None
# End of castType
# Debugging function to log the values of the arguments
# if and only if logArgs is set to True
def getArgs(*args):
"Return a string of the args if logArgs is True; Empty String otherwise"
res = " "
if logArgs:
res +="( "
for arg in args:
res = res + " , " + str(arg)
res += " )"
return res
# Start the implementation of the injectFault functions for each op type
# This is a special case for the Cast function which needs to remember the type
# We use closures to remember the type and cast it appropriately at "runtime"
def createInjectFaultCast(type):
"Returns a Function to call injectFault on cast nodes"
castInto = castType(type) # get the appropriate casting function for the type
def injectFaultCast(a, b = None):
"Inject a fault into a Cast instruction"
logging.debug("Calling Operator Cast " + getArgs(a, b))
# If we're given 2 parameters, treat it as the default case
if b != None:
res = np.cast(a, b)
else:
# Call the function for this type with 'a'
res = castInto(a)
res = condPerturb(Ops.CAST, res)
if logReturn: logging.debug("\tReturning " + str(res) )
return res
# Return the injectFaultCast function
return injectFaultCast
def injectFaultNoop():
"Inject a fault in the Noop operaton - does nothing"
logging.debug("Calling Operator Noop")
# No need to call Perturb as there's nothing to return
return
def injectFaultAssign(a, b):
"Inject a fault in the assignement operation"
logging.debug("Calling Operator Assigment " + getArgs(a, b))
res = b # FIXME: Check semantics of assignment operator
res = condPerturb(Ops.ASSIGN, res)
if logReturn: logging.debug("\tReturning from Assignment " + str(res) )
return res
def injectFaultIdentity(a):
"Inject a fault in the identitiy operation"
logging.debug("Calling Operator Identity" + getArgs(a))
res = a
res = condPerturb(Ops.IDENTITY, res)
if logReturn: logging.debug("\tReturning from Identity " + str(res) )
return res
def injectFaultAdd(a, b):
"Function to call injectFault on Add nodes"
logging.debug("Calling Operator Add " + getArgs(a, b))
#if logInjection: logging.debug("\t TESTING ... Calling Operator Add " + str(a) + "," + str(b)+")")
resOp = tf.add(a, b)
with tf.Session() as sess:
res = resOp.eval()
res = condPerturb(Ops.ADD, res)
#if logInjection: logging.debug("\t TESTING ... Returning from Add = " + str(res))
if logReturn: logging.debug("\tReturning from Add " + str(res) )
return res
def injectFaultSub(a, b):
"Function to call injectFault on Sub nodes"
logging.debug("Calling Operator Sub " + getArgs(a, b))
res = a - b
res = condPerturb(Ops.SUB, res)
if logReturn: logging.debug("\tReturning from Sub " + str(res) )
return res
def injectFaultMul(a, b):
"Function to call injectFault on Mul nodes"
logging.debug("Calling Operator Mul " + getArgs(a, b))
res = a * b
res = condPerturb(Ops.MUL,res)
if logReturn: logging.debug("\tReturning from Mul " + str(res) )
return res
def injectFaultSquare(a):
"Function to call injectFault on Square nodes"
logging.debug("Calling Operator Square " + getArgs(a))
res = a * a
res = condPerturb(Ops.SQUARE,res)
if logReturn: logging.debug("\tReturning from Square " + str(res) )
return res
def injectFaultShape(a):
"Function to call injectFault on Shape nodes"
logging.debug("Calling Operator Shape " + getArgs(a))
# If it's a tensor, call shape on it directly
# Otherwise, use numpy to get its shape
if isinstance(a, tf.Tensor):
res = a.shape()
else:
# res = tf.convert_to_tensor( np.shape(a) , dtype = np.int32 )
res = np.int32( np.shape(a) )
# res should be either a scalar or tensor here
res = condPerturb(Ops.SHAPE,res)
if logReturn: logging.debug("\tReturning from Shape " + str(res) )
return res
def injectFaultSize(a):
"Function to call injectFault on Size nodes"
logging.debug("Calling Operator Size " + getArgs(a))
res = a.size()
res = condPerturb(Ops.SIZE, res)
if logReturn: logging.debug("\tReturning from Size " + str(res) )
return res
def injectFaultFill(a, b):
"Function to call injectFault on Shape nodes"
logging.debug("Calling Operator Fill " + getArgs(a, b))
res = np.full(a, b)
res = condPerturb(Ops.FILL, res)
if logReturn: logging.debug("\tReturning from Fill" + str(res) )
return res
def injectFaultFloorMod(a, b):
"Function to call injectFault on FloorMod nodes"
logging.debug("Calling Operator FloorMod " + getArgs(a, b))
# FIXME: Need to check if mod is the equivalent of floorMod in NumPy
res = np.mod(a, b)
res = condPerturb(Ops.FLOORMOD, res)
if logReturn: logging.debug("\tReturning from FloorMod " + str(res) )
return res
def injectFaultRange(start, stop, step, dtype = None):
"Function to call injectFault on Range nodes"
logging.debug("Calling Operator Range " + getArgs(start, stop, step))
res = np.int32(np.arange(start, stop, step, dtype))
res = condPerturb(Ops.RANGE, res)
if logReturn: logging.debug("\tReturning from Range " + str(res) )
return res
def injectFaultRank(a):
"Function to call injectFault on Rank nodes"
logging.debug("Calling Operator Rank " + getArgs(a))
res = np.int32( np.ndim(a) )
res = condPerturb(Ops.RANK, res)
if logReturn: logging.debug("\tReturning from Rank " + str(res) )
return res
def injectFaultSum(a, b):
"Function to call injectFault on Sum nodes"
logging.debug("Calling Operator Sum " + getArgs(a, b))
# Check if b is an integer scalar array
# and if so, pass it to np.sum
# Otherwise, ignore it (FIXME: is this the correct behavior ?)
if np.isscalar(b):
res = np.sum(a, b)
else:
res = np.sum(a)
res = condPerturb(Ops.SUM, res)
if logReturn: logging.debug("\tReturning from Sum " + str(res) )
return res
def injectFaultReshape(a, b):
"Function to call injectFault on Reshape"
logging.debug("Calling Operator Reshape " + getArgs(a, b))
res = np.reshape(a, b)
res = condPerturb(Ops.RESHAPE, res)
if logReturn: logging.debug("\tReturning from Reshape " + str(res) )
return res
def injectFaultOneHot(a, b, c, d):
"Function to call injectFault on OneHot"
logging.debug("Calling Operator One Hot " + getArgs(a, b, c, d))
# TF adds two default arguments, so we need to pass them as well
resOp = tf.one_hot(a, b, c, d)
with tf.Session() as sess:
res = resOp.eval()
res = condPerturb(Ops.ONE_HOT, res)
if logReturn: logging.debug("\tReturning from One Hot " + str(res) )
return res
def injectFaultMatMul(a, b):
"Function to call injectFault on matrix multiplication"
logging.debug("Calling Operator MatMul " + getArgs(a, b))
matmul = tf.matmul(a,b)
with tf.Session() as sess:
res = matmul.eval()
# res = np.matmul(a, b)
res = condPerturb(Ops.MATMUL, res)
if logReturn: logging.debug("\tReturning from MatMul " + str(res) )
return res
def injectFaultArgMax(a, b):
"Function to call injectFault on ArgMax"
logging.debug("Calling Operator ArgMax " + getArgs(a, b))
resOp = tf.argmax(a, b)
with tf.Session() as sess:
res = resOp.eval()
res = condPerturb(Ops.ARGMAX, res)
if logReturn: logging.debug("\tReturning from ArgMax " + str(res) )
return res
def injectFaultArgMin(a, b):
"Function to call injectFault on ArgMin"
logging.debug("Calling Operator ArgMin " + getArgs(a, b))
res = np.argmin(a, b)
res = condPerturb(Ops.ARGMIN, res)
if logReturn: logging.debug("\tReturning from ArgMin " + str(res) )
return res
def injectFaultEqual(a, b):
"Function to call injectFault on equal"
logging.debug("Calling Operator Equal " + getArgs(a, b))
res = np.equal(a, b)
res = condPerturb(Ops.EQUAL, res)
if logReturn: logging.debug("\tReturning from Equal " + str(res) )
return res
def injectFaultNotEqual(a, b):
"Function to call injectFault on not equal"
logging.debug("Calling Operator Not Equal " + getArgs(a, b))
res = np.not_equal(a, b)
res = condPerturb(Ops.NOT_EQUAL, res)
if logReturn: logging.debug("\tReturning from Not Equal " + str(res) )
return res
def injectFaultLessEqual(a, b):
"Function to call injectFault on less equal"
logging.debug("Calling Operator Less Equal " + getArgs(a, b))
res = np.less_equal(a, b)
res = condPerturb(Ops.LESS_EQUAL, res)
if logReturn: logging.debug("\tReturning from Less Equal " + str(res) )
return res
def injectFaultGreaterEqual(a, b):
"Function to call injectFault on greater equal"
logging.debug("Calling Operator Greater Equal " + getArgs(a, b))
res = np.greater_equal(a, b)
res = condPerturb(Ops.GREATER_EQUAL, res)
if logReturn: logging.debug("\tReturning from Greater Equal " + str(res) )
return res
def injectFaultMean(a, b):
"Function to call injectFault on mean"
logging.debug("Calling Operator mean " + getArgs(a, b))
# FIXME: This only works if we call np.mean on b[0]. Need to figure out why.
res = np.mean(a, b[0])
res = condPerturb(Ops.MEAN, res)
if logReturn: logging.debug("\tReturning from Mean " + str(res) )
return res
def injectFaultCountNonZero(a):
"Function to call injectFault on countNonZero"
logging.debug("Calling Operator CountNonZero " + getArgs(a))
res = np.count_nonzero(a)
res = condPerturb(Ops.COUNT_NONZERO, res)
if logReturn: logging.debug("\tReturning on CountNonZero " + str(res) )
return res
def injectFaultConv2D(a, b, strides, padding):
"Function to call injectFault on Conv2D"
logging.debug("Calling Operator conv2D " + getArgs(a, b))
conv = tf.nn.conv2d(a , b, strides=strides.tolist(), padding=padding)
with tf.Session() as sess:
res = conv.eval()
res = condPerturb(Ops.CONV2D, res)
if logReturn: logging.debug("\tReturning from Conv2D " + str(res) )
return res
def injectFaultRelu(a):
"Function to call injectFault on RelU"
logging.debug("Calling Operator RelU " + getArgs(a))
relu = tf.nn.relu(a)
with tf.Session() as sess:
res = relu.eval()
res = condPerturb(Ops.RELU, res)
if logReturn: logging.debug("\tReturning from RelU " + str(res) )
return res
def injectFaultMaxPool(a, ksize, strides, padding):
"Function to call injectFault on MaxPool"
maxpool = tf.nn.max_pool(a, ksize=ksize.tolist(), strides=strides.tolist(), padding=padding)
with tf.Session() as sess:
res = maxpool.eval()
res = condPerturb(Ops.MAXPOOL, res)
if logReturn: logging.debug("\tReturningfrom MaxPool " + str(res) )
return res
def injectFaultUnpack(a):
"Function to call injectFault on unpack"
logging.debug("Calling Operator Unpack " + getArgs(a))
# This operation is deprecated in TF 1.0 and above
res = np.array_split(a, a.shape[1])
# FIXME: Can't inject faults into unpack as it's not a tensor or scalar
# res = condPerturb(Ops.UNPACK, res)
if logReturn: logging.debug("\tReturning from Unpack " + str(res) )
return res
def injectFaultUnstack(a):
"Function to call injectFault on unstack"
# This is the same as Unpack in newer versions of TF
logging.debug("Calling Operator Unstack " + getArgs(a, b, c))
resOp = tf.unstack(a, b, c)
with tf.Session() as sess:
res = resOp.eval()
if logReturn: logging.debug("\tReturning from Unstack " + str(res) )
return res
def injectFaultStridedSlice(a, b, c, d):
"Function to call injectFault on StridedSlice"
logging.debug("Calling Operator StridedSlice " + getArgs(a, b, c, d))
# FIXME: Implement this functionality
resOp = tf.strided_slice(a, b, c, d)
with tf.Session() as sess:
res = resOp.eval()
res = condPerturb(Ops.STRIDEDSLICE, res)
if logReturn: logging.debug("\tReturning from StridedSlice " + str(res) )
return res
def injectFaultExpandDims(a, b):
"Function to call injectFault on ExpandDims"
logging.debug("Calling Operator ExpandDims " + getArgs(a, b))
res = np.expand_dims(a, b)
res = condPerturb(Ops.EXPANDDIMS, res)
if logReturn: logging.debug("\tReturning from ExpandDims " + str(res) )
return res
def injectFaultPack(a, b):
"Function to call injectFault on Pack"
# FIXME: Implement this functionality
logging.debug("Calling Operator Pack" + getArgs(a, b))
# res = np.stack(a, b)
# FIXME: This throws an exception, so we dummied it out
res = a
res = condPerturb(Ops.PACK, res)
if logReturn: logging.debug("\tReturning " + str(res) )
return res
def injectFaultConcatV2(a, b, c):
"Function to call injectFault on ConcatV2"
logging.debug("Calling Operator ConcatV2" + getArgs(a, b, c))
res = np.concatenate((a, b), c)
res = condPerturb(Ops.PACK, res)
if logReturn: logging.debug("\tReturning from Concat " + str(res) )
return res
def injectFaultSoftmax(a):
"Function to call injectFault on Softmax"
logging.debug("Calling Operator Softmax " + getArgs(a))
resOp = tf.nn.softmax(a)
with tf.Session() as sess:
res = resOp.eval()
res = condPerturb(Ops.SOFTMAX, res)
if logReturn: logging.debug("\tReturning from Softmax " + str(res) )
return res
def injectFaultMaximum(a, b):
"Function to call injectFault on Maximum"
logging.debug("Calling Operator Maximum " + getArgs(a, b))
res = np.maximum(a, b)
res = condPerturb(Ops.MAXIMUM, res)
if logReturn: logging.debug("\tReturning from Maximum " + str(res) )
return res
def injectFaultMinimum(a, b):
"Function to call injectFault on Maximum"
logging.debug("Calling Operator Minimum " + getArgs(a, b))
res = np.minimum(a, b)
res = condPerturb(Ops.MINIMUM, res)
if logReturn: logging.debug("\tReturning from Minimum " + str(res) )
return res
def injectFaultSwitch(a, b):
"Function to call injectFault on Switch"
logging.debug("Calling Operator Switch " + getArgs(a, b))
# FIXME: Actually implement the Switch operation
# Only there's no TensorFlow documentation for it !!!
# res = np.select(a, b)
res = a, a
# res = condPerturb(Ops.SWITCH, res)
if logReturn: logging.debug("\tReturning from Switch " + str(res) )
return res
def injectFaultGreater(a, b):
"Function to call injectFault on Greater"
logging.debug("Calling Operator Greater " + getArgs(a, b))
res = np.greater(a, b)
res = condPerturb(Ops.GREATER, res)
if logReturn: logging.debug("\tReturning from Greater " + str(res) )
return res
def injectFaultNeg(a):
"Function to call injectFault on negative"
logging.debug("Calling Operator Neg " + getArgs(a))
res = np.negative(a)
res = condPerturb(Ops.NEGATIVE, res)
if logReturn: logging.debug("\tReturning from Neg " + str(res) )
return res
def injectFaultPow(a, b):
"Function to call injectFault on pow"
logging.debug("Calling Operator Pow " + getArgs(a, b))
res = np.power(a, b)
res = condPerturb(Ops.POWER, res)
if logReturn: logging.debug("\tReturning from Pow " + str(res) )
return res
def injectFaultAbs(a):
"Function to call injectFault on absolute"
logging.debug("Calling Operator Abs " + getArgs(a))
res = np.absolute(a)
res = condPerturb(Ops.ABSOLUTE, res)
if logReturn: logging.debug("\tReturning from Abs " + str(res) )
return res
def injectFaultRsqrt(a):
"Function to call injectFault on Rsqrt"
logging.debug("Calling Operator Rsqrt " + getArgs(a))
res = np.reciprocal( np.sqrt(a) )
res = condPerturb(Ops.RSQRT, res)
if logReturn: logging.debug("\tReturning from Rsqrt " + str(res) )
return res
def injectFaultNN(a, b, c):
"Function to call injectFault on Nearest Neighbors"
# FIXME: According to the TF docs, this operation doesn't exist !
# Not sure what the third parameter is supposed to be.
logging.debug("Calling Operator Nearest Neighbors " + getArgs(a, b, c))
res = KNeighborsClassifier(a)
if logReturn: logging.debug("\tReturning from Nearest Neighbors " + str(res) )
return res
def injectFaultLog(a):
"Function to call injectFault on Log"
logging.debug("Calling Operator Log " + getArgs(a))
res = np.reciprocal( np.log(a) )
res = condPerturb(Ops.LOG, res)
if logReturn: logging.debug("\tReturning from Log " + str(res) )
return res
def injectFaultRealDiv(a, b):
"Function to call injectFault on RealDiv"
# FIXME: Implement this functionality
logging.debug("Calling Operator Log " + getArgs(a, b))
res = np.divide( a, b )
res = condPerturb(Ops.REALDIV, res)
if logReturn: logging.debug("\tReturning from RealDiv " + str(res) )
return res
def injectFaultBiasAdd(a, b):
"Function to call injectFault on BiasAdd"
logging.debug("Calling Operator BiasAdd " + getArgs(a, b))
res = a + b
res = condPerturb(Ops.BIASADD, res)
if logReturn: logging.debug("\tReturning from BiasAdd " + str(res) )
return res
def injectFaultSigmoid(a):
"Function to call injectFault on Sigmoid"
logging.debug("Calling Operator Sigmoid " + getArgs(a))
res = np.reciprocal( 1 + np.exp(-a) )
res = condPerturb(Ops.SIGMOID, res)
if logReturn: logging.debug("\tReturning from Sigmoid " + str(res) )
return res
def injectFaultTanh(a):
"Function to call injectFault on Tanh"
logging.debug("Calling Operator Tanh " + getArgs(a))
res = np.tanh( a )
res = condPerturb(Ops.TANH, res)
if logReturn: logging.debug("\tReturning from Tanh " + str(res) )
return res
def injectFaultLRN(a, bias, alpha, beta):
"Function to call injectFault on LRN"
logging.debug("Calling Operator LRN" + getArgs(a, bias, alpha, beta))
# FIXME: How to derive the depth_radius from LRN
# Currently we manually use the value from the main program.
# depth_radius = 2
resOp = tf.nn.lrn( a , 2, bias=bias, alpha=alpha, beta=beta)
with tf.Session() as sess:
res = resOp.eval()
res = condPerturb(Ops.LRN, res)
if logReturn: logging.debug("\tReturning from LRN " + str(res) )
return res
def injectFaultELU(a):
"Function to call injectFault on ELU"
logging.debug("Calling Operator ELU " + getArgs(a))
relu = tf.nn.elu(a)
with tf.Session() as sess:
res = relu.eval()
res = condPerturb(Ops.ELU, res)
if logReturn: logging.debug("\tReturning from ELU " + str(res) )
return res
def injectFaultRandomUniform(a):
"Function to call injectFault on Random Uniform"
logging.debug("Calling Operator RandomUniform" + getArgs(a))
ru = tf.random_uniform(a)
with tf.Session() as sess:
res = ru.eval()
res = condPerturb(Ops.RANDOM_UNIFORM, res)
if logReturn: logging.debug("\tReturning from Random Uniform " + str(res) )
return res
def injectFaultFloor(a):
"Function to call injectFault on Floor"
logging.debug("Calling Operator injectFaultFloor" + getArgs(a))
floor = tf.math.floor(a)
with tf.Session() as sess:
res = floor.eval()
res = condPerturb(Ops.FLOOR, res)
if logReturn: logging.debug("\tReturning from Floor operation " + str(res) )
return res
# End of implemented operators
##### None of the functions below have been implemented yet as they're not used #####
#### If you implement any of them, please move them above the line ####
##### Otherwise, they will all raise NotImplementedError(OpName) ####3
def injectFaultDynamicStitch(inputs):
"Function to call injectFault on Dynamic stitch"
# FIXME: Implement this functionality
logging.debug("Calling Operator Dynamic stitch ")
raise NotImplementedError("DynamicStitch")
def injectFaultFloorDiv(inputs):
"Function to call injectFault on FloorDiv"
# FIXME: Implement this functionality
logging.debug("Calling Operator FloorDiv ")
raise NotImplementedError("FloorDiv")
def injectFaultTile(inputs):
"Function to call injectFault on Tile"
# FIXME: Implement this functionality
logging.debug("Calling Operator Tile")
raise NotImplementedError("Tile")
def injectFaultConcatOffset(inputs):
"Function to call injectFault on ConcatOffset"
# FIXME: Implement this functionality
logging.debug("Calling Operator ConcatOffset")
raise NotImplementedError("ConcatOffset")
def injectFaultSplit(inputs):
"Function to call injectFault on Split"
# FIXME: Implement this functionality
logging.debug("Calling Operator Split")
raise NotImplementedError("Split")
def injectFaultSoftmaxCEWL(inputs):
"Function to call injectFault on Softmax CEWL"
# FIXME: Implement this functionality
logging.debug("Calling Operator SoftmaxCEWL")
raise NotImplementedError("SoftmaCEWL")
def injectFaultSlice(inputs):
"Function to call injectFault on Slice"
# FIXME: Implement this functionality
logging.debug("Calling Operator Slice")
raise NotImplementedError("Slice")
def injectFaultBroadcastGA(inputs):
"Function to call injectFault on Broadcast gradient args"
# FIXME: Implement this functionality
logging.debug("Calling Operator BroadcastGA")
raise NotImplementedError("BroadcastGA")
def injectFaultTruncatedNormal(a):
"Function to call injectFault on TruncatedNormal"
# FIXME: Implement this functionality
logging.debug("Calling Operator TruncatedNormal") # + str(a))
raise NotImplementedError("TruncatedNormal")
def injectFaultRandomUniformInt(a):
"Function to call injectFault on Random Uniform Int"
# FIXME: Implement this functionality
logging.debug("Calling Operator RandomUniformInt")
raise NotImplementedError("RandomUniformInt")
def injectFaultRandomStandardNormal(a):
"Function to call injectFault on Random Standard Normal"
# FIXME: Implement this functionality
logging.debug("Calling Operator RandomStandardNormal")
raise NotImplementedError("RandomStandardNormal")
def injectFaultRefSwitch(a):
"Function to call injectFault on RefSwitch"
# FIXME: Implement this functionality
logging.debug("Calling Operator RefSwitch")
raise NotImplementedError("RefSwitch")
def injectFaultProd(a):
"Function to call injectFault on Prod"
# FIXME: Implement this functionality
logging.debug("Calling Operator Prod")
raise NotImplementedError("Prod")
def injectFaultUnique(a):
"Function to call injectFault on Unique"
# FIXME: Implement this functionality
logging.debug("Calling Operator Unique")
raise NotImplementedError("Unique")
def injectFaultReciprocal(a):
"Function to call injectFault on Reciprocal"
# FIXME: Implement this functionality
logging.debug("Calling Operator Reciprocal")
raise NotImplementedError("Reciprocal")
def injectFaultScatterAdd(a):
"Function to call injectFault on ScatterAdd"
# FIXME: Implement this functionality
logging.debug("Calling Operator ScatterAdd")
raise NotImplementedError("ScatterAdd")
def injectFaultReluGrad(a):
"Function to call injectFault on ReluGrad"
# FIXME: Implement this functionality
logging.debug("Calling Operator ReluGrad")
raise NotImplementedError("ReluGrad")
def injectFaultMaxPoolGrad(a):
"Function to call injectFault on MaxPoolGrad"
# FIXME: Implement this functionality
logging.debug("Calling Operator MaxPoolGrad")
raise NotImplementedError("MaxPoolGrad")
def injectFaultTanhGrad(a):
"Function to call injectFault on TanhGrad"
# FIXME: Implement this functionality
logging.debug("Calling Operator TanhGrad")
raise NotImplementedError("TanhGrad")
def injectFaultSigmoidGrad(a):
"Function to call injectFault on SigmoidGrad"
# FIXME: Implement this functionality
logging.debug("Calling Operator SigmoidGrad")
raise NotImplementedError("SigmoidGrad")
def injectFaultBiasAddGrad(a):
"Function to call injectFault on BiasAddGrad"
# FIXME: Implement this functionality
logging.debug("Calling Operator BiasAddGrad")
raise NotImplementedError("BiasAddGrad")
def injectFaultShapeN(inputs):
"Function to call injectFault on ShapeN"
# FIXME: Implement this functionality
logging.debug("Calling Operator ShapeN")
raise NotImplementedError("ShapeN")
def injectFaultAddN(inputs):
"Function to call injectFault on AddN"
# FIXME: Implement this functionality
logging.debug("Calling Operator AddN")
raise NotImplementedError("AddN")
def injectFaultConv2DBackprop(inputs):
"Function to call injectFault on Conv2DBackprop"
# FIXME: Implement this functionality
logging.debug("Calling Operator Conv2DBackProp")
raise NotImplementedError("Conv2DBackProp")
def injectFaultApplyAdam(inputs):
"Function to call injectFault on ApplyAdam"
# FIXME: Implement this functionality
logging.debug("Calling Operator ApplyAdam")
raise NotImplementedError("ApplyAdam")
def injectFaultSelect(inputs):
"Function to call injectFault on Select"
# FIXME: Implement this functionality
logging.debug("Calling Operator Select")
raise NotImplementedError("Select")
def injectFaultMerge(inputs):
"Function to call injectFault on Merge"
# FIXME: Implement this functionality
logging.debug("Calling Operator Merge")
raise NotImplementedError("Merge")
def injectFaultTranspose(inputs):
"Function to call injectFault on Transpose"
# FIXME: Implement this functionality
logging.debug("Calling Operator Transpose")
raise NotImplementedError("Transpose")
def injectFaultTranspose(inputs):
"Function to call injectFault on Transpose"
# FIXME: Implement this functionality
logging.debug("Calling Operator Transpose")
raise NotImplementedError("Transpose")
def injectFaultGather(inputs):
"Function to call injectFault on Gather"
# FIXME: Implement this functionality
logging.debug("Calling Operator Gather")
raise NotImplementedError("Gather")
def injectFaultUnsortedSegmentSum(inputs):
"Function to call injectFault on UnsortedSegmentSum"
# FIXME: Implement this functionality
logging.debug("Calling Operator UnsortedSegmentSum")
raise NotImplementedError("UnsortedSegmentSum")
def injectFaultInvertPermutation(inputs):
"Function to call injectFault on InvertPermutation"
# FIXME: Implement this functionality
logging.debug("Calling Operator InvertPermuation")
raise NotImplementedError("InvertPermutation")
def injectFaultApplyGradientDescent(inputs):
"Function to call injectFault on applying gradient descent"
# FIXME: Implement this functionality
logging.debug("Calling Operator ApplyGradientDescent")
raise NotImplementedError("ApplyGradientDescent")
def injectFaultZerosLike(inputs):
"Function to call injectFault on ZerosLike"
# FIXME: Implement this functionality
logging.debug("Calling Operator ZerosLike")
raise NotImplementedError("ZerosLike")
def injectFaultPreventGradient(inputs):
"Function to call injectFault on PreventGradient"
# FIXME: Implement this functionality
logging.debug("Calling Operator PreventGradient")
raise NotImplementedError("PreventGradient")
def injectFaultSSSmcEWL(inputs):
"Function to call injectFault on SoftSparseMax.."
# FIXME: Implement this functionality
logging.debug("Calling Operator SoftSparseMax")
raise NotImplementedError("SoftSparseMax")
def injectFaultAll(a):
"Function to call injectFault on All operation"
# FIXME: Implement this functionality
# Not clear what this does - TF doc is silent about this
logging.debug("Calling Operator All")
raise NotImplementedError("All")
def injectFaultAssert(a):
"Function to call injectFault on Assert operation"
# FIXME: Implement this functionality
logging.debug("Calling Operator Assert")
raise NotImplementedError("Assert")
def injectFaultLess(a):
"Function to call injectFault on Less operation"
# FIXME: Implement this functionality
logging.debug("Calling Operator Less")
raise NotImplementedError("Less")
def injectFaultFSRHOP(a):
"Function to call Inject fault on FertileResource Op"
# FIXME: Implement this functionality
logging.debug("Calling Operator FSRHOP")
raise NotImplementedError("FSRHOP")
def injectFaultL2Loss(a):
"Function to call Inject fault on L2Loss operation"
# FIXME: Implement this functionality
logging.debug("Calling Operator L2Loss")
raise NotImplementedError("L2Loss")
def injectFaultApplyMomentum(a):
"Function to call Inject fault on ApplyMomentum operation"
# FIXME: Implement this functionality
logging.debug("Calling Operator ApplyMomentum")
raise NotImplementedError("ApplyMomentum")
def injectFaultAssignAdd(a):
"Function to call Inject fault on AssignAdd operation"
# FIXME: Implement this functionality
logging.debug("Calling Operator AssignAdd")
raise NotImplementedError("AssignAdd")
def injectFaultSqueeze(a):
"Function to call injectFault on Squeeze"
# FIXME: Implement this functionality
logging.debug("Calling Operator Squeeze")
raise NotImplementedError("Squeeze")
##### End of unimplemented functions ###################
# This is the generic "Catch-all" function - it should be last
# It takes a variable number of arguments in the inputs array
def injectFaultGeneric(*inputs):
"Generic Function to call fault injection on each input and zero it out"
outputs = []
#logging.debug("Calling generic fiFunc on " + str(inputs))
# Perturb the input and add it to the outpus
# FIXME: Should we NOT actually do the operation as well ??
# For now, we don't do any injection at all at this function
for input in inputs:
outputs.append( input )
#if logReturn: logging.debug("\tReturning " + str(outputs))
return outputs
# End of injectFault operations
# The functions in this table are the ones defined above
# FIXME: These are fairly repetitive, so perhaps generate them automatically
# Also, maybe these should be sorted alphabetically - this is getting quite big
opTable = {
"NoOp" : injectFaultNoop, # First operation
"Add": injectFaultAdd,
"Sub": injectFaultSub,
"Mul": injectFaultMul,
"Square" : injectFaultSquare,
"Assign" : injectFaultAssign,
"Identity": injectFaultIdentity,
"Range": injectFaultRange,
"Rank": injectFaultRank,
"Sum" : injectFaultSum,
"Shape": injectFaultShape,
"Fill": injectFaultFill,
"Size": injectFaultSize,
"FloorMod" : injectFaultFloorMod,
"DynamicStitch" : injectFaultDynamicStitch,
"Maximum" : injectFaultMaximum,
"Max" : injectFaultMaximum, # FIXME: Not sure if Max is a synonymn of Maximum or a new operation
"Minimum" : injectFaultMinimum,
"Min" : injectFaultMinimum, # FIXME: Not sure if Min is a synonymn of Minimum or a new operation
"FloorDiv" : injectFaultFloorDiv,
"Reshape" : injectFaultReshape,
"OneHot": injectFaultOneHot,
"Tile" : injectFaultTile,
"ConcatV2" : injectFaultConcatV2,
"ConcatOffset" : injectFaultConcatOffset,
"BiasAdd" : injectFaultBiasAdd,
"Split" : injectFaultSplit,
"Sigmoid" : injectFaultSigmoid,
"Tanh" : injectFaultTanh,
"Softmax" : injectFaultSoftmax,
"SoftmaxCrossEntropyWithLogits" : injectFaultSoftmaxCEWL,
"Pack" : injectFaultPack,
"Slice" : injectFaultSlice,
"StridedSlice" : injectFaultStridedSlice,
"BroadcastGradientArgs" : injectFaultBroadcastGA,
"Neg" : injectFaultNeg,
"Pow" : injectFaultPow,
"Abs" : injectFaultAbs,
"Unpack": injectFaultUnpack,
"Unstack": injectFaultUnstack,
"MatMul" : injectFaultMatMul,
"ArgMax" : injectFaultArgMax,
"ArgMin" : injectFaultArgMin,
"Equal" : injectFaultEqual,
"NotEqual" : injectFaultNotEqual,
"LessEqual" : injectFaultLessEqual,
"GreaterEqual" : injectFaultGreaterEqual,
"TruncatedNormal" : injectFaultTruncatedNormal,
"Conv2D" : injectFaultConv2D,
"Relu" : injectFaultRelu,
"MaxPool" : injectFaultMaxPool,
"RandomUniform" : injectFaultRandomUniform,
"RandomUniformInt" : injectFaultRandomUniformInt,
"RandomStandardNormal" : injectFaultRandomStandardNormal,
"Floor" : injectFaultFloor,
"Rsqrt" : injectFaultRsqrt,
"Log" : injectFaultLog,
"RefSwitch" : injectFaultRefSwitch,
"NearestNeighbors" : injectFaultNN,
"Prod" : injectFaultProd,
"Squeeze" : injectFaultSqueeze,
"Unique" : injectFaultUnique,
"Reciprocal" : injectFaultReciprocal,
"ScatterAdd" : injectFaultScatterAdd,
"ReluGrad" : injectFaultReluGrad,
"MaxPoolGrad" : injectFaultMaxPoolGrad,
"TanhGrad" : injectFaultTanhGrad,
"SigmoidGrad" : injectFaultSigmoidGrad,
"BiasAddGrad" : injectFaultBiasAddGrad,
"ShapeN" : injectFaultShapeN,
"AddN" : injectFaultAddN,
"Conv2DBackpropInput" : injectFaultConv2DBackprop,
"Conv2DBackpropFilter" : injectFaultConv2DBackprop,
"ApplyAdam" : injectFaultApplyAdam,
"Select" : injectFaultSelect,
"Switch" : injectFaultSwitch,
"Merge" : injectFaultMerge,
"Transpose" : injectFaultTranspose,
"Gather" : injectFaultGather,
"UnsortedSegmentSum" : injectFaultUnsortedSegmentSum,
"InvertPermutation" : injectFaultInvertPermutation,
# Casts are treated differently, so don't add them to this table ! See createInjectFaultCast
# "Cast" : injectFaultCast,
"Mean" : injectFaultMean,
"Count_nonzero" : injectFaultCountNonZero,
"RealDiv" : injectFaultRealDiv,
"Greater" : injectFaultGreater,
"ApplyGradientDescent" : injectFaultApplyGradientDescent,
"ZerosLike" : injectFaultZerosLike,
"PreventGradient" : injectFaultPreventGradient,
"ExpandDims" : injectFaultExpandDims,
"SparseSoftmaxCrossEntropyWithLogits" : injectFaultSSSmcEWL,
"All" : injectFaultAll,
"Assert" : injectFaultAssert,
"Less" : injectFaultLess,
"FertileStatsResourceHandleOp" : injectFaultFSRHOP,
"L2Loss" : injectFaultL2Loss,
"ApplyMomentum" : injectFaultApplyMomentum,
"AssignAdd" : injectFaultAssignAdd,
"LRN" : injectFaultLRN,
"Elu" : injectFaultELU,
"Unknown": injectFaultGeneric # Last operation
# "Unknown": None # For debugging purposes
}
| 34.922449 | 105 | 0.736746 | [
"MIT"
] | PhuongLe/PilotNet | TensorFI/injectFault.py | 42,780 | Python |
import glob
import yaml
import os
from generators import intermediate_files
from schema import cleaner
# This script takes all ECS and custom fields already loaded, and lets users
# filter out the ones they don't need.
def filter(fields, subset_file_globs, out_dir):
subsets = load_subset_definitions(subset_file_globs)
for subset in subsets:
subfields = extract_matching_fields(fields, subset['fields'])
intermediate_files.generate(subfields, os.path.join(out_dir, 'ecs', 'subset', subset['name']), False)
merged_subset = combine_all_subsets(subsets)
if merged_subset:
fields = extract_matching_fields(fields, merged_subset)
return fields
def combine_all_subsets(subsets):
'''Merges N subsets into one. Strips top level 'name' and 'fields' keys as well as non-ECS field options since we can't know how to merge those.'''
merged_subset = {}
for subset in subsets:
strip_non_ecs_options(subset['fields'])
merge_subsets(merged_subset, subset['fields'])
return merged_subset
def load_subset_definitions(file_globs):
if not file_globs:
return []
subsets = []
for f in eval_globs(file_globs):
raw = load_yaml_file(f)
subsets.append(raw)
if not subsets:
raise ValueError('--subset specified, but no subsets found in {}'.format(file_globs))
return subsets
def load_yaml_file(file_name):
with open(file_name) as f:
return yaml.safe_load(f.read())
def eval_globs(globs):
'''Accepts an array of glob patterns or file names, returns the array of actual files'''
all_files = []
for g in globs:
new_files = glob.glob(g)
if len(new_files) == 0:
warn("{} did not match any files".format(g))
else:
all_files.extend(new_files)
return all_files
# You know, for silent tests
def warn(message):
print(message)
ecs_options = ['fields', 'enabled', 'index']
def strip_non_ecs_options(subset):
for key in subset:
subset[key] = {x: subset[key][x] for x in subset[key] if x in ecs_options}
if 'fields' in subset[key] and isinstance(subset[key]['fields'], dict):
strip_non_ecs_options(subset[key]['fields'])
def merge_subsets(a, b):
'''Merges field subset definitions together. The b subset is merged into the a subset. Assumes that subsets have been stripped of non-ecs options.'''
for key in b:
if key not in a:
a[key] = b[key]
elif 'fields' in a[key] and 'fields' in b[key]:
if b[key]['fields'] == '*':
a[key]['fields'] = '*'
elif isinstance(a[key]['fields'], dict) and isinstance(b[key]['fields'], dict):
merge_subsets(a[key]['fields'], b[key]['fields'])
elif 'fields' in a[key] or 'fields' in b[key]:
raise ValueError("Subsets unmergeable: 'fields' found in key '{}' in only one subset".format(key))
# If both subsets have enabled set to False, this will leave enabled: False in the merged subset
# Otherwise, enabled is removed and is implicitly true
if a[key].get('enabled', True) or b[key].get('enabled', True):
a[key].pop('enabled', None)
# Same logic from 'enabled' applies to 'index'
if a[key].get('index', True) or b[key].get('index', True):
a[key].pop('index', None)
def extract_matching_fields(fields, subset_definitions):
'''Removes fields that are not in the subset definition. Returns a copy without modifying the input fields dict.'''
retained_fields = {x: fields[x].copy() for x in subset_definitions}
for key, val in subset_definitions.items():
retained_fields[key]['field_details'] = fields[key]['field_details'].copy()
for option in val:
if option != 'fields':
if 'intermediate' in retained_fields[key]['field_details']:
retained_fields[key]['field_details']['intermediate'] = False
retained_fields[key]['field_details'].setdefault(
'description', 'Intermediate field included by adding option with subset')
retained_fields[key]['field_details']['level'] = 'custom'
cleaner.field_cleanup(retained_fields[key])
retained_fields[key]['field_details'][option] = val[option]
# If the field in the schema has a 'fields' key, we expect a 'fields' key in the subset
if 'fields' in fields[key]:
if 'fields' not in val:
raise ValueError("'fields' key expected, not found in subset for {}".format(key))
elif isinstance(val['fields'], dict):
retained_fields[key]['fields'] = extract_matching_fields(fields[key]['fields'], val['fields'])
elif val['fields'] != "*":
raise ValueError("Unexpected value '{}' found in 'fields' key".format(val['fields']))
# If the field in the schema does not have a 'fields' key, there should not be a 'fields' key in the subset
elif 'fields' in val:
raise ValueError("'fields' key not expected, found in subset for {}".format(key))
return retained_fields
| 42.056452 | 153 | 0.640652 | [
"Apache-2.0"
] | 6un9-h0-Dan/ecs | scripts/schema/subset_filter.py | 5,215 | Python |
# Copyright 2020 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import functools
from typing import Tuple, Dict, Union
import bidict
import bpy
import munch
from kubric import core
logger = logging.getLogger(__name__)
class Blender:
def __init__(self, scene: core.Scene):
self.objects_to_blend = bidict.bidict()
self.ambient_node = None
self.ambient_hdri_node = None
self.illum_mapping_node = None
self.bg_node = None
self.bg_hdri_node = None
self.bg_mapping_node = None
self.clear_and_reset() # as blender has a default scene on load
# the ray-tracing engine is set here because it affects the availability of some features
bpy.context.scene.render.engine = "CYCLES"
self.add(scene)
self.set_up_scene_shading()
bpy.context.scene.cycles.use_adaptive_sampling = True # speeds up rendering
bpy.context.scene.view_layers[0].cycles.use_denoising = True # improves the output quality
def add(self, obj: core.Asset):
if obj in self.objects_to_blend:
return self.objects_to_blend[obj]
blender_obj, setters = add_object(obj)
# set the name of the object to the UID
blender_obj.name = obj.uid
# if it has a rotation mode, then make sure it is set to quaternions
if hasattr(blender_obj, "rotation_mode"):
blender_obj.rotation_mode = "QUATERNION"
# remember object association
self.objects_to_blend[obj] = blender_obj
# if object is an actual Object (eg. not a Scene, or a Material)
# then ensure that it is linked into (used by) the current scene collection
if isinstance(blender_obj, bpy.types.Object):
collection = bpy.context.scene.collection.objects
if blender_obj not in collection.values():
collection.link(blender_obj)
for name, setter in setters.items():
setter.mapping = self.objects_to_blend
# recursively add sub-assets
value = getattr(obj, name)
if isinstance(value, core.Asset):
value = self.add(value)
# Initialize values
setter(munch.Munch(owner=obj, new=value, type="init"))
# Link values
obj.observe(setter, names=[name])
obj.destruction_callbacks.append(Destructor([blender_obj]))
obj.keyframe_callbacks.append(Keyframer(setters))
return blender_obj
def get_blender_object(self, obj: core.Object3D) -> bpy.types.Object:
if isinstance(obj, bpy.types.Object):
return obj
elif isinstance(obj, core.Object3D):
return self.objects_to_blend[obj]
else:
raise ValueError("Not a valid object {}".format(obj))
def clear_and_reset(self):
bpy.ops.wm.read_factory_settings(use_empty=True)
bpy.context.scene.world = bpy.data.worlds.new("World")
def set_up_exr_output(self, path):
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
# clear existing nodes
for node in tree.nodes:
tree.nodes.remove(node)
# the render node has outputs for all the rendered layers
render_node = tree.nodes.new(type="CompositorNodeRLayers")
# create a new FileOutput node
out_node = tree.nodes.new(type="CompositorNodeOutputFile")
# set the format to EXR (multilayer)
out_node.format.file_format = "OPEN_EXR_MULTILAYER"
out_node.base_path = str(path) # output directory
layers = ["Image", "Depth", "Vector", "UV", "Normal", "CryptoObject00"]
out_node.file_slots.clear()
for l in layers:
out_node.file_slots.new(l)
links.new(render_node.outputs.get(l), out_node.inputs.get(l))
def set_up_scene_shading(self):
bpy.context.scene.world.use_nodes = True
tree = bpy.context.scene.world.node_tree
links = tree.links
# clear the tree
for node in tree.nodes.values():
tree.nodes.remove(node)
# create nodes
out_node = tree.nodes.new(type="ShaderNodeOutputWorld")
out_node.location = 1100, 0
mix_node = tree.nodes.new(type="ShaderNodeMixShader")
mix_node.location = 900, 0
lightpath_node = tree.nodes.new(type="ShaderNodeLightPath")
lightpath_node.location = 700, 350
self.ambient_node = tree.nodes.new(type="ShaderNodeBackground")
self.ambient_node.inputs["Color"].default_value = (0., 0., 0., 1.)
self.ambient_node.location = 700, 0
self.bg_node = tree.nodes.new(type="ShaderNodeBackground")
self.bg_node.inputs["Color"].default_value = (0., 0., 0., 1.)
self.bg_node.location = 700, -120
links.new(lightpath_node.outputs.get("Is Camera Ray"), mix_node.inputs.get("Fac"))
links.new(self.ambient_node.outputs.get("Background"), mix_node.inputs[1])
links.new(self.bg_node.outputs.get("Background"), mix_node.inputs[2])
links.new(mix_node.outputs.get("Shader"), out_node.inputs.get("Surface"))
# create nodes for HDRI images, but leave them disconnected until set_ambient_illumination or set_background
coord_node = tree.nodes.new(type="ShaderNodeTexCoord")
self.bg_mapping_node = tree.nodes.new(type="ShaderNodeMapping")
self.bg_mapping_node.location = 200, 200
self.bg_hdri_node = tree.nodes.new(type="ShaderNodeTexEnvironment")
self.bg_hdri_node.location = 400, 200
links.new(coord_node.outputs.get("Generated"), self.bg_mapping_node.inputs.get("Vector"))
links.new(self.bg_mapping_node.outputs.get("Vector"), self.bg_hdri_node.inputs.get("Vector"))
#links.new(bg_hdri_node.outputs.get("Color"), self.bg_node.inputs.get("Color"))
self.illum_mapping_node = tree.nodes.new(type="ShaderNodeMapping")
self.illum_mapping_node.location = 200, -200
self.ambient_hdri_node = tree.nodes.new(type="ShaderNodeTexEnvironment")
self.ambient_hdri_node.location = 400, -200
links.new(coord_node.outputs.get("Generated"), self.illum_mapping_node.inputs.get("Vector"))
links.new(self.illum_mapping_node.outputs.get("Vector"), self.ambient_hdri_node.inputs.get("Vector"))
# links.new(illum_hdri_node.outputs.get("Color"), self.illum_node.inputs.get("Color"))
def set_ambient_light(self, hdri_filepath=None, color=(0., 0., 0., 1.0), hdri_rotation=(0., 0., 0.)):
tree = bpy.context.scene.world.node_tree
links = tree.links
if hdri_filepath is None:
# disconnect incoming links from hdri node (if any)
for link in self.ambient_node.inputs["Color"].links:
links.remove(link)
self.ambient_node.inputs["Color"].default_value = color
else:
# ensure hdri_node is connected
links.new(self.ambient_hdri_node.outputs.get("Color"), self.ambient_node.inputs.get("Color"))
self.ambient_hdri_node.image = bpy.data.images.load(hdri_filepath, check_existing=True)
self.illum_mapping_node.inputs.get("Rotation").default_value = hdri_rotation
def set_background(self, hdri_filepath=None, color=(0., 0., 0., 1.0), hdri_rotation=(0., 0., 0.)):
tree = bpy.context.scene.world.node_tree
links = tree.links
if hdri_filepath is None:
# disconnect incoming links from hdri node (if any)
for link in self.bg_node.inputs["Color"].links:
links.remove(link)
self.bg_node.inputs["Color"].default_value = color
else:
# ensure hdri_node is connected
links.new(self.bg_hdri_node.outputs.get("Color"), self.bg_node.inputs.get("Color"))
self.bg_hdri_node.image = bpy.data.images.load(hdri_filepath, check_existing=True)
self.bg_mapping_node.inputs.get("Rotation").default_value = hdri_rotation
def activate_render_passes(self):
view_layer = bpy.context.scene.view_layers[0]
view_layer.use_pass_vector = True # flow
view_layer.use_pass_uv = True # UV
view_layer.use_pass_normal = True # surface normals
view_layer.cycles.use_pass_crypto_object = True # segmentation
view_layer.cycles.pass_crypto_depth = 2
def set_size(self, width: int, height: int):
bpy.context.scene.render.resolution_x = width
bpy.context.scene.render.resolution_y = height
def save_state(self, path: Union[pathlib.Path, str], filename: str = "scene.blend",
pack_textures: bool = True):
path = pathlib.Path(path)
path.mkdir(parents=True, exist_ok=True)
if pack_textures:
bpy.ops.file.pack_all() # embed all textures into the blend file
bpy.ops.wm.save_mainfile(filepath=str(path / filename))
def render(self, path: Union[pathlib.Path, str]):
self.activate_render_passes()
path = pathlib.Path(path)
bpy.context.scene.render.filepath = str(path / "images" / "frame_")
self.set_up_exr_output(path / "exr" / "frame_")
bpy.ops.render.render(animation=True, write_still=True)
# ########## Functions to import kubric objects into blender ###########
@functools.singledispatch
def add_object(obj: core.Asset) -> Tuple[bpy.types.Object, Dict[str, core.AttributeSetter]]:
raise NotImplementedError()
@add_object.register(core.Cube)
def _add_object(obj: core.Cube):
bpy.ops.mesh.primitive_cube_add()
cube = bpy.context.active_object
return cube, {
'position': core.AttributeSetter(cube, 'location'),
'quaternion': core.AttributeSetter(cube, 'rotation_quaternion'),
'scale': core.AttributeSetter(cube, 'scale'),
'material': core.AttributeSetter(cube, 'active_material')
}
@add_object.register(core.Sphere)
def _add_object(obj: core.Sphere):
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=5)
bpy.ops.object.shade_smooth()
cube = bpy.context.active_object
return cube, {
'position': core.AttributeSetter(cube, 'location'),
'quaternion': core.AttributeSetter(cube, 'rotation_quaternion'),
'scale': core.AttributeSetter(cube, 'scale'),
'material': core.AttributeSetter(cube, 'active_material')
}
@add_object.register(core.FileBasedObject)
def _add_object(obj: core.FileBasedObject):
# TODO: support other file-formats
bpy.ops.import_scene.obj(filepath=str(obj.render_filename),
axis_forward=obj.front, axis_up=obj.up)
assert len(bpy.context.selected_objects) == 1
blender_obj = bpy.context.selected_objects[0]
setters = {
"position": core.AttributeSetter(blender_obj, "location"),
"quaternion": core.AttributeSetter(blender_obj, "rotation_quaternion"),
"scale": core.AttributeSetter(blender_obj, "scale"),
"material": core.AttributeSetter(blender_obj, "active_material")
}
return blender_obj, setters
@add_object.register(core.DirectionalLight)
def _add_object(obj: core.DirectionalLight):
sun = bpy.data.lights.new(obj.uid, "SUN")
sun_obj = bpy.data.objects.new(obj.uid, sun)
setters = {
"position": core.AttributeSetter(sun_obj, "location"),
"quaternion": core.AttributeSetter(sun_obj, "rotation_quaternion"),
"scale": core.AttributeSetter(sun_obj, "scale"),
"color": core.AttributeSetter(sun, "color"),
"intensity": core.AttributeSetter(sun, "energy")}
return sun_obj, setters
@add_object.register(core.RectAreaLight)
def _add_object(obj: core.RectAreaLight):
area = bpy.data.lights.new(obj.uid, "AREA")
area_obj = bpy.data.objects.new(obj.uid, area)
setters = {
"position": core.AttributeSetter(area_obj, "location"),
"quaternion": core.AttributeSetter(area_obj, "rotation_quaternion"),
"scale": core.AttributeSetter(area_obj, "scale"),
"color": core.AttributeSetter(area, "color"),
"intensity": core.AttributeSetter(area, "energy"),
"width": core.AttributeSetter(area, "size"),
"height": core.AttributeSetter(area, "size_y")}
return area_obj, setters
@add_object.register(core.PointLight)
def _add_object(obj: core.PointLight):
area = bpy.data.lights.new(obj.uid, "POINT")
area_obj = bpy.data.objects.new(obj.uid, area)
setters = {
"position": core.AttributeSetter(area_obj, "location"),
"quaternion": core.AttributeSetter(area_obj, "rotation_quaternion"),
"scale": core.AttributeSetter(area_obj, "scale"),
"color": core.AttributeSetter(area, "color"),
"intensity": core.AttributeSetter(area, "energy")}
return area_obj, setters
@add_object.register(core.PerspectiveCamera)
def _add_object(obj: core.PerspectiveCamera):
camera = bpy.data.cameras.new(obj.uid)
camera.type = "PERSP"
camera_obj = bpy.data.objects.new(obj.uid, camera)
setters = {
"position": core.AttributeSetter(camera_obj, "location"),
"quaternion": core.AttributeSetter(camera_obj, "rotation_quaternion"),
"scale": core.AttributeSetter(camera_obj, "scale"),
"focal_length": core.AttributeSetter(camera, "lens"),
"sensor_width": core.AttributeSetter(camera, "sensor_width")}
return camera_obj, setters
@add_object.register(core.OrthographicCamera)
def _add_object(obj: core.OrthographicCamera):
camera = bpy.data.cameras.new(obj.uid)
camera.type = 'ORTHO'
camera_obj = bpy.data.objects.new(obj.uid, camera)
setters = {
'position': core.AttributeSetter(camera_obj, 'location'),
'quaternion': core.AttributeSetter(camera_obj, 'rotation_quaternion'),
'scale': core.AttributeSetter(camera_obj, 'scale'),
'orthographic_scale': core.AttributeSetter(camera, 'ortho_scale')}
return camera_obj, setters
@add_object.register(core.PrincipledBSDFMaterial)
def _add_object(obj: core.PrincipledBSDFMaterial):
mat = bpy.data.materials.new(obj.uid)
mat.use_nodes = True
bsdf_node = mat.node_tree.nodes["Principled BSDF"]
setters = {
"color": core.AttributeSetter(bsdf_node.inputs["Base Color"], "default_value"),
"roughness": core.AttributeSetter(bsdf_node.inputs["Roughness"], "default_value"),
"metallic": core.AttributeSetter(bsdf_node.inputs["Metallic"], "default_value"),
"specular": core.AttributeSetter(bsdf_node.inputs["Specular"], "default_value"),
"specular_tint": core.AttributeSetter(bsdf_node.inputs["Specular Tint"], "default_value"),
"ior": core.AttributeSetter(bsdf_node.inputs["IOR"], "default_value"),
"transmission": core.AttributeSetter(bsdf_node.inputs["Transmission"], "default_value"),
"transmission_roughness": core.AttributeSetter(bsdf_node.inputs["Transmission Roughness"],
"default_value"),
"emission": core.AttributeSetter(bsdf_node.inputs["Emission"], "default_value"),
}
return mat, setters
@add_object.register(core.MeshChromeMaterial)
def _add_object(obj: core.MeshChromeMaterial):
# --- Create node-based material
mat = bpy.data.materials.new("Chrome")
mat.use_nodes = True
tree = mat.node_tree
tree.remove(tree.nodes["Principled BSDF"]) # remove the default shader
# --- Specify nodes
LW = tree.nodes.new("ShaderNodeLayerWeight")
LW.inputs[0].default_value = 0.7
CR = tree.nodes.new("ShaderNodeValToRGB")
CR.color_ramp.elements[0].position = 0.9
CR.color_ramp.elements[1].position = 1
CR.color_ramp.elements[1].color = (0, 0, 0, 1)
GLO = tree.nodes.new("ShaderNodeBsdfGlossy")
# --- link nodes
tree.links.new(LW.outputs[1], CR.inputs["Fac"])
tree.links.new(CR.outputs["Color"], GLO.inputs["Color"])
tree.links.new(GLO.outputs[0], tree.nodes["Material Output"].inputs["Surface"])
setters = {
"color": core.AttributeSetter(CR.color_ramp.elements[0], "color"),
"roughness": core.AttributeSetter(GLO.inputs[1], "default_value")
}
return mat, setters
@add_object.register(core.FlatMaterial)
def _add_object(obj: core.FlatMaterial):
# --- Create node-based material
mat = bpy.data.materials.new('Holdout')
mat.use_nodes = True
tree = mat.node_tree
tree.nodes.remove(tree.nodes['Principled BSDF']) # remove the default shader
output_node = tree.nodes['Material Output']
# This material is constructed from three different shaders:
# 1. if holdout=False then emission_node is responsible for giving the object a uniform color
# 2. if holdout=True, then the holdout_node is responsible for making the object transparent
# 3. if indirect_visibility=False then transparent_node makes the node invisible for indirect
# effects such as shadows or reflections
light_path_node = tree.nodes.new(type="ShaderNodeLightPath")
holdout_node = tree.nodes.new(type="ShaderNodeHoldout")
transparent_node = tree.nodes.new(type="ShaderNodeBsdfTransparent")
holdout_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
indirect_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
overall_mix_node = tree.nodes.new(type="ShaderNodeMixShader")
emission_node = tree.nodes.new(type="ShaderNodeEmission")
tree.links.new(transparent_node.outputs['BSDF'], indirect_mix_node.inputs[1])
tree.links.new(emission_node.outputs['Emission'], indirect_mix_node.inputs[2])
tree.links.new(emission_node.outputs['Emission'], holdout_mix_node.inputs[1])
tree.links.new(holdout_node.outputs['Holdout'], holdout_mix_node.inputs[2])
tree.links.new(light_path_node.outputs['Is Camera Ray'], overall_mix_node.inputs['Fac'])
tree.links.new(indirect_mix_node.outputs['Shader'], overall_mix_node.inputs[1])
tree.links.new(holdout_mix_node.outputs['Shader'], overall_mix_node.inputs[2])
tree.links.new(overall_mix_node.outputs['Shader'], output_node.inputs['Surface'])
return mat, {
'color': core.AttributeSetter(emission_node.inputs['Color'], 'default_value'),
'holdout': core.AttributeSetter(holdout_mix_node.inputs['Fac'], 'default_value'),
'indirect_visibility': core.AttributeSetter(indirect_mix_node.inputs['Fac'], 'default_value'),
}
@add_object.register(core.Scene)
def _add_scene(obj: core.Scene):
blender_scene = bpy.context.scene
setters = {
"frame_start": core.AttributeSetter(blender_scene, "frame_start"),
"frame_end": core.AttributeSetter(blender_scene, "frame_end"),
"frame_rate": core.AttributeSetter(blender_scene.render, "fps"),
"resolution": core.AttributeSetter(blender_scene.render, ["resolution_x", "resolution_y"]),
"camera": core.AttributeSetter(blender_scene, "camera")
}
return blender_scene, setters
# ########### ########### ########### ########### ########### ########### ########### ##########
class Destructor:
def __init__(self, blender_objects):
self.blender_objects = blender_objects
def __call__(self, owner=None):
for obj in self.blender_objects:
try:
if isinstance(obj, bpy.types.Object):
bpy.data.objects.remove(obj, do_unlink=True)
elif isinstance(obj, bpy.types.Material):
bpy.data.materials.remove(obj, do_unlink=True)
except ReferenceError:
pass # In this case the object is already gone
class Keyframer:
def __init__(self, setters):
self.setters = setters
def __call__(self, owner, member, frame):
setter = self.setters[member]
setter.target_obj.keyframe_insert(setter.target_name, frame=frame)
| 39.900826 | 112 | 0.713546 | [
"Apache-2.0"
] | Xtuden-com/kubric | kubric/renderer/blender.py | 19,312 | Python |
from bs4 import BeautifulSoup
from spotipy.oauth2 import SpotifyOAuth
import requests
import spotipy
SPOTIFY_CLIENT_ID = "YOUR_SPOTIFY_CLIENT_ID"
SPOTIFY_CLIENT_SECRET = "YOUR_SPOTIFY_CLIENT_SECRET"
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
client_id=SPOTIFY_CLIENT_ID,
client_secret=SPOTIFY_CLIENT_SECRET,
redirect_uri="https://www.example.com",
scope="playlist-modify-private",
show_dialog=True,
cache_path="token.txt"
)
)
user_id = sp.current_user()["id"]
travel_date = input("Which year do you want to travel to? Type the date in this format YYYY-MM-DD:")
travel_year = travel_date[:4]
billboard_url = f"https://www.billboard.com/charts/hot-100/{travel_date}"
response = requests.get(billboard_url)
soup = BeautifulSoup(response.text, "html.parser")
song_names = [name.getText() for name in soup.select(".chart-element__information__song")]
song_artists = [name.getText() for name in soup.select(".chart-element__information__artist")]
songs = [{
"artist": song_artists[i],
"name": song_names[i]
} for i in range(len(song_artists))]
print(songs)
song_urls = []
for song in songs:
sp_song = sp.search(f"track:{song['name']} year:{travel_year}", type="track")
try:
url = sp_song["tracks"]["items"][0]["uri"]
song_urls.append(url)
except IndexError:
print(f"{song['name']} doesn't exist in Spotify. Skipped.")
playlist = sp.user_playlist_create(user=user_id, name=f"{travel_date} Billboard 100", public=False)
sp.playlist_add_items(playlist_id=playlist["id"], items=song_urls)
| 31.294118 | 100 | 0.718672 | [
"MIT"
] | gabrielmdr/100-days-of-code | projects/day046/music-time-machine.py | 1,596 | Python |
"""
config_objects.py
By: John-Michael O'Brien
Date: 7/25/2020
Data structures that define and load configuration information for the
wallpaper watcher.
"""
from typing import List, Dict, Optional
from dataclasses import dataclass, field
import jsons
import yaml
@dataclass
class SubredditConfig():
""" Holds any per-subreddit configuration. That's nothing right now. """
@dataclass
class MultiredditConfig():
""" Holds information necessary to access a multireddit """
user: str
multi: str
@dataclass
class SourcesConfig():
""" Holds information about image sources """
subreddits: Optional[Dict[str, Optional[SubredditConfig]]]
multis: Optional[Dict[str, MultiredditConfig]]
@dataclass
class Size():
""" Holds a size """
width: int
height: int
aspect_ratio: float = field(init=False, repr=False)
def __post_init__(self):
self.aspect_ratio = float(self.width) / float(self.height)
@dataclass
class TargetConfig():
""" Holds information about a save target """
path: str
size: Size
sources: List[str]
allow_nsfw: bool = True
@dataclass
class WallpaperConfig():
""" Loads and holds the configuration for wallpaperwatcher. """
aspect_ratio_tolerance: float
max_downloads: int
update_interval: int
sources: SourcesConfig
targets: Dict[str, TargetConfig]
@staticmethod
def from_file(filename: str) -> "WallpaperConfig":
""" Creates a WallpaperConfig from a YAML file """
with open(filename, "r") as input_file:
return jsons.load(yaml.load(input_file, Loader=yaml.SafeLoader), WallpaperConfig)
@dataclass
class RedditAuthInfo():
""" Holds Reddit Authentication Values """
client_id: str
client_secret: str
@staticmethod
def from_file(filename: str) -> "RedditAuthInfo":
""" Creates a RedditAuthInfo from a YAML file """
with open(filename, "r") as input_file:
auth = jsons.load(yaml.load(input_file, Loader=yaml.SafeLoader), RedditAuthInfo)
return auth
| 27.346667 | 93 | 0.695758 | [
"MIT"
] | JimTheCactus/RedditWallpaperWatcher | config_objects.py | 2,051 | Python |
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from caret_analyze import Application, Lttng
from caret_analyze import Application, Lttng
from caret_analyze.plot import message_flow
from ros2caret.verb import VerbExtension
class MessageFlowVerb(VerbExtension):
def add_arguments(self, parser, cli_name):
parser.add_argument(
'-t', '--trace_dir', dest='trace_dir',
help='trace dir', required=True)
parser.add_argument(
'-a', '--architecture_path', dest='architecture_path',
help='architecture', required=True)
parser.add_argument(
'-o', '--output_path', dest='output_path',
help='output path to the message flow file', required=True)
parser.add_argument(
'-p', '--path_name', dest='path_name',
help='path name of trace points to be visualized', required=True)
parser.add_argument(
'-g', '--granularity', dest='granularity', default=None,
help='granularity of trace points to be visualized')
def main(self, *, args):
lttng = Lttng(args.trace_dir, force_conversion=True)
app = Application(args.architecture_path, 'yaml', lttng)
path = app.path[args.path_name]
message_flow(path, export_path=args.output_path,
granularity=args.granularity)
| 38.52 | 77 | 0.680685 | [
"Apache-2.0"
] | tier4/ros2caret | ros2caret/verb/message_flow.py | 1,926 | Python |
# -*- coding: utf-8 -*-
"""
unifonicnextgen
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class GetScheduledMessageResponse(object):
"""Implementation of the 'Get Scheduled Message response' model.
GetsDetails of specified scheduled message, If MessageID is specified,
only one message is returned,Otherwise all messages(paginated) are
queried.
Attributes:
success (bool): The request sent successfully
message (string): The Error message if its false, Null if its true
error_code (string): the error code if there is any
data (object): Message id, Time created, correlation id., status,
number of units, cost, balance, Recipient
"""
# Create a mapping from Model property names to API property names
_names = {
"success":'success',
"message":'message',
"error_code":'errorCode',
"data":'data'
}
def __init__(self,
success=None,
message=None,
error_code=None,
data=None):
"""Constructor for the GetScheduledMessageResponse class"""
# Initialize members of the class
self.success = success
self.message = message
self.error_code = error_code
self.data = data
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
success = dictionary.get('success')
message = dictionary.get('message')
error_code = dictionary.get('errorCode')
data = dictionary.get('data')
# Return an object of this model
return cls(success,
message,
error_code,
data)
| 29.56962 | 84 | 0.581336 | [
"MIT"
] | masaar/unifonic_python_sdk | unifonicnextgen/models/get_scheduled_message_response.py | 2,336 | Python |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
from version import __version__
setup(
name='opencensus-ext-pymongo',
version=__version__, # noqa
author='OpenCensus Authors',
author_email='census-developers@googlegroups.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
description='OpenCensus pymongo Integration',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=[
'opencensus >= 0.7.13, < 1.0.0',
'pymongo >= 3.1.0',
],
extras_require={},
license='Apache-2.0',
packages=find_packages(exclude=('tests',)),
namespace_packages=[],
url='https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pymongo', # noqa: E501
zip_safe=False,
)
| 37.444444 | 127 | 0.66815 | [
"Apache-2.0"
] | census-instrumentation/opencensus-python | contrib/opencensus-ext-pymongo/setup.py | 2,022 | Python |
# encoding: utf-8
import json
from .. import (
DatabaseTest,
sample_data
)
from lxml import etree
from core.coverage import CoverageFailure
from core.model import Contributor, Identifier, Measurement
from core.metadata_layer import *
from oclc.classify import (
IdentifierLookupCoverageProvider,
OCLCClassifyXMLParser,
MockOCLCClassifyAPI,
)
class MockParser(OCLCClassifyXMLParser):
def __init__(self):
self.call_count = 0
self.called_with = []
def parse(self, tree, metadata):
self.call_count += 1
self.called_with = metadata
return self.called_with
class MockParserSingle(MockParser):
def initial_look_up(self, tree):
return 2, []
class MockParserMulti(MockParser):
def initial_look_up(self, tree):
results = []
owi_numbers = ["48446512", "48525129"]
for number in owi_numbers:
data = IdentifierData(Identifier.OCLC_WORK, number)
results.append(data)
return 4, results
class MockProvider(IdentifierLookupCoverageProvider):
def __init__(self, collection):
self.apply_called_with = []
self.apply_call_count = 0
super(MockProvider, self).__init__(collection)
def _single(self, tree, metadata):
self.called_with = dict(tree=tree, metadata=metadata)
metadata.data_source = DataSource.OCLC
return metadata
def _multiple(self, owi_data, metadata):
self.called_with = dict(owi_data=owi_data, metadata=metadata)
return metadata
def _apply(self, metadata):
self.apply_called_with = metadata.primary_identifier
self.apply_call_count += 1
class MockProviderSingle(MockProvider):
def _get_tree(self, **kwargs):
xml = sample_data("single_work_with_isbn.xml", "oclc_classify")
return etree.fromstring(xml, parser=etree.XMLParser(recover=True))
class MockProviderMulti(MockProvider):
def _get_tree(self, **kwargs):
xml = sample_data("multi_work_with_owis.xml", "oclc_classify")
return etree.fromstring(xml, parser=etree.XMLParser(recover=True))
class TestIdentifierLookupCoverageProvider(DatabaseTest):
SINGLE_ISBN = "9781620281932"
MULTI_ISBN = "0345391837"
def _tree(self, type):
if type == "single":
return MockProviderSingle(self._default_collection)._get_tree(isbn=self.SINGLE_ISBN)
else:
return MockProviderMulti(self._default_collection)._get_tree(isbn=self.MULTI_ISBN)
def _id(self, type):
if type == "single":
return self._identifier(Identifier.ISBN, self.SINGLE_ISBN)
else:
return self._identifier(Identifier.ISBN, self.MULTI_ISBN)
def test_process_item_single(self):
# Testing that, when process_item finds out that a document's status code is 2,
# it calls _single, passes in the correct tree and blank metadata object as arguments,
# and returns the original ISBN. Uses mocked versions of _get_tree,
# initial_look_up, and _single.
provider = MockProviderSingle(self._default_collection)
provider.parser = MockParserSingle()
id = self._id("single")
result = provider.process_item(id)
assert etree.tostring(provider.called_with["tree"]) == etree.tostring(provider._get_tree(isbn=self.SINGLE_ISBN))
assert isinstance(provider.called_with["metadata"], Metadata)
assert provider.called_with["metadata"].primary_identifier == id
assert result == id
def test_process_item_multi(self):
# Testing that, when process_item finds out that a document's status code is 4,
# it calls _multiple, passes in the correct OWIs, and
# returns the original ISBN. Uses mocked versions of _get_tree, initial_look_up,
# and _multiple.
provider = MockProviderMulti(self._default_collection)
provider.parser = MockParserMulti()
id = self._id("multi")
result = provider.process_item(id)
assert [x.identifier for x in provider.called_with["owi_data"]] == ["48446512", "48525129"]
assert isinstance(provider.called_with["metadata"], Metadata)
assert provider.called_with["metadata"].primary_identifier == id
assert result == id
def test_process_item_failure(self):
# If the ISBN is not found--i.e. the status code is 102--the provider should throw an error.
api = MockOCLCClassifyAPI(self._db)
api.queue_response(sample_data("isbn_not_found.xml", "oclc_classify"))
provider = IdentifierLookupCoverageProvider(self._default_collection, api=api)
bad_id = self._identifier(Identifier.ISBN, "9781429984171")
failure = provider.process_item(bad_id)
# We asked OCLC about the ISBN...
assert ['http://classify.oclc.org/classify2/Classify?isbn=9781429984171'] == api.requests
# ...but we didn't get anything useful.
assert isinstance(failure, CoverageFailure)
assert failure.exception == "The work with ISBN 9781429984171 was not found."
def test__apply_propagates_replacement_policy(self):
# When IdentifierLookupCoverageProvider applies metadata
# to the database, it uses the replacement policy associated with
# the coverage provider.
class MockMetadata(Metadata):
def apply(self, *args, **kwargs):
self.called_with = (args, kwargs)
metadata = MockMetadata(data_source=DataSource.OCLC,
primary_identifier=self._identifier())
provider = IdentifierLookupCoverageProvider(
self._default_collection, replacement_policy=object()
)
provider._apply(metadata)
args, kwargs = metadata.called_with
assert kwargs['replace'] == provider.replacement_policy
def test__apply_single(self):
# Testing that, in the case of a single-work response, _apply is called with the return value of _single.
provider = MockProviderSingle(self._default_collection)
provider.parser = MockParserSingle()
id = self._id("single")
provider.process_item(id)
result = provider.apply_called_with
assert result.identifier == id.identifier
assert provider.apply_call_count == 1
def test__apply_multiple(self):
# Testing that, even in the case of a multi-work response, _apply is only called once;
# we only want to end up with one Metadata object (and one corresponding edition).
provider = MockProviderMulti(self._default_collection)
provider.parser = MockParserMulti()
id = self._id("multi")
provider.process_item(id)
result = provider.apply_called_with
assert result == id
assert provider.apply_call_count == 1
def test__single(self):
# Testing that _single calls parse, passes in the correct tree and
# identifier as arguments, and returns the resulting value. Uses a mocked
# version of parse.
provider = IdentifierLookupCoverageProvider(self._default_collection)
provider.parser = MockParserSingle()
tree, identifier = self._tree("single"), self._id("single")
metadata = self._blank_metadata(identifier)
provider._single(tree, metadata)
result = provider.parser.called_with
assert (result.primary_identifier.type, result.primary_identifier.identifier) == (Identifier.ISBN, self.SINGLE_ISBN)
def test__multiple(self):
# Testing that _multiple calls parse, passes in the correct OWIs, and
# returns the resulting value. Uses mocked versions of
# initial_look_up (to get the list of OWIs) and parse.
api = MockOCLCClassifyAPI(self._db)
for filename in (
'single_work_48446512.xml',
'single_work_48525129.xml',
):
api.queue_response(sample_data(filename, "oclc_classify"))
provider = IdentifierLookupCoverageProvider(self._default_collection, api=api)
provider.parser = MockParserMulti()
tree, identifier = self._tree("multi"), self._id("multi")
metadata = self._blank_metadata(identifier)
code, owi_data = provider.parser.initial_look_up(tree)
provider._multiple(owi_data, metadata)
result = provider.parser.called_with
# Make sure parse was called twice--once for each of the two OWIs.
assert provider.parser.call_count == 2
assert result.primary_identifier.identifier == self.MULTI_ISBN
assert isinstance(result.primary_identifier, Identifier)
def test__single_with_real_parser(self):
# Testing that calling _single actually returns the correct metadata object.
provider = IdentifierLookupCoverageProvider(self._default_collection)
tree, identifier = self._tree("single"), self._id("single")
metadata = self._blank_metadata(identifier)
result = provider._single(tree, metadata)
assert isinstance(result, Metadata)
assert result._data_source == "OCLC Classify"
assert result.primary_identifier == identifier
self._check_measurements(result.measurements, "single")
[author] = result.contributors
assert isinstance(author, ContributorData)
assert self._get_contributor_info(author) == ("Melville, Herman", "n79006936", "27068555", ["Author"], {"deathDate": "1891", "birthDate": "1819"})
def test__multiple_with_real_parser(self):
# Testing that calling _multiple actually returns the correct metadata object.
api = MockOCLCClassifyAPI(self._db)
for filename in (
'single_work_48446512.xml',
'single_work_48525129.xml',
):
api.queue_response(sample_data(filename, "oclc_classify"))
provider = IdentifierLookupCoverageProvider(
self._default_collection, api=api
)
tree, identifier = self._tree("multi"), self._id("multi")
metadata = self._blank_metadata(identifier)
code, owi_data = provider.parser.initial_look_up(tree)
result = provider._multiple(owi_data, metadata)
# Two requests were made to the mock API -- one for each of the OWIs we had to look up
# while parsing the multi-OWI document.
assert (
[
'http://classify.oclc.org/classify2/Classify?owi=48446512',
'http://classify.oclc.org/classify2/Classify?owi=48525129',
] ==
api.requests
)
# We ended up with a single Metadata object, which contains
# information derived from looking up both OWIs.
assert isinstance(result, Metadata)
assert result._data_source == "OCLC Classify"
assert result.primary_identifier == identifier
# The author info just comes from the first work.
expected_author_info = ("Adams, Douglas", "n80076765", "113230702", ["Author"], {"deathDate": "2001", "birthDate": "1952"})
[author] = result.contributors
author_info = self._get_contributor_info(author)
assert author_info == expected_author_info
# Measurement info is also just from the first work.
self._check_measurements(result.measurements, "multi")
# The subject data is collected from both works. We prove this by making sure
# that the list of Fast identifiers consists of the unique union of the Fast identifiers
# obtained by looking up each of the two <work> tags by its OWI.
[ddc], [lcc], fast = self._get_subjects(result.subjects)
assert ddc.identifier == "823.914"
assert lcc.identifier == "PR6051.D3352"
# We got 5 Fast subject classifications from the first <work> tag:
fast_work_1 = set([
"Dent, Arthur (Fictitious character)",
"Prefect, Ford",
"Interplanetary voyages",
"Interstellar travel",
"Fiction",
])
# And 6 from the second <work> tag, 4 of which overlap with the ones from the first <work> tag:
fast_work_2 = set([
"Dent, Arthur (Fictitious character)",
"Prefect, Ford",
"Interplanetary voyages",
"Interstellar travel",
"Science fiction, English",
"Humorous stories, English",
])
# So, our Metadata object should end up with 7 Fast subject classifications--the 4 shared ones,
# plus 1 unique one from work #1 and 2 unique ones from work #2.
assert len(fast) == 7
fast_subject_names = set([x.name for x in fast])
assert fast_subject_names == fast_work_1.union(fast_work_2)
def _get_subjects(self, subjects):
# Everything in the list of subjects should be a SubjectData object.
assert len([x for x in subjects if isinstance(x, SubjectData)]), len(subjects)
# Extract a sublist for each type of classifier.
sublists = [[x for x in subjects if x.type == type] for type in ["DDC", "LCC", "FAST"]]
# There should always be 1 DDC classification and 1 LCC classification.
assert (len(sublists[0]), len(sublists[1])) == (1, 1)
return sublists
def _check_measurements(self, measurements, type):
values = {
"single": {
Measurement.HOLDINGS: 41932,
Measurement.PUBLISHED_EDITIONS: 1
},
"multi": {
Measurement.HOLDINGS: 5976,
Measurement.PUBLISHED_EDITIONS: 160
}
}
assert len(measurements) == 2
[holdings], [editions] = [[x for x in measurements if y in x.quantity_measured] for y in ["holdings", "editions"]]
for m in [holdings, editions]:
assert isinstance(m, MeasurementData)
expected_value = values[type][m.quantity_measured]
assert m.weight == 1
assert m.value == expected_value
def _get_contributor_info(self, contributor):
return (
contributor.sort_name,
contributor.lc,
contributor.viaf,
contributor.roles,
contributor.extra
)
def _blank_metadata(self, identifier):
metadata = Metadata(
data_source=DataSource.OCLC,
primary_identifier=identifier
)
return metadata
| 41.454023 | 154 | 0.661237 | [
"Apache-2.0"
] | NYPL-Simplified/metadata_wrangler | tests/oclc_/test_identifier_lookup_coverage_provider.py | 14,426 | Python |
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from ..models import Student
class SignUpView(TemplateView):
template_name = 'registration/signup.html'
def home(request):
if request.user.is_authenticated:
if request.user.is_teacher:
return redirect('teachers:quiz_change_list')
else:
return redirect('students:quiz_list')
return render(request, 'classroom/home.html')
def save_github_user(backend, user, response, *args, **kwargs):
if backend.name == 'github':
if not user.is_student:
user.is_student = True
user.save()
student = Student.objects.create(user=user)
# avatar_url = response.get('avatar_url')
# print(user, response) | 31.76 | 63 | 0.672544 | [
"MIT"
] | netomo/django-schools | django_school/classroom/views/classroom.py | 794 | Python |
import pytorchresearch as ptr
import torch
import torchvision
if __name__ == "__main__":
# transform for data
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)
)
])
# dataloaders
trainset = torchvision.datasets.CIFAR100(root='./data/datasets', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR100(root='./data/datasets', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=32,
shuffle=False, num_workers=2)
# model specific stuff
model = torchvision.models.MobileNetV2(num_classes=100)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.SGD(
params=model.parameters(),
lr=0.001,
momentum=0.9
)
criterion = torch.nn.CrossEntropyLoss()
# MAGIC GOES HERE
research = ptr.ModelResearch(
research_path='.temp',
research_scheme=[
ptr.ModelConfigurationItem(),
ptr.CurrentIterationItem(print_end=' ', iteration_modulo=10),
ptr.LossPrintItem(iteration_modulo=10),
ptr.LossVisualizationItem(iteration_modulo=10)
],
model=model,
optimizer=optimizer,
criterion=criterion,
watch_test=False
)
research.start_research_session(
trainloader, testloader, epochs=1, iteration_modulo=20)
| 33.053571 | 80 | 0.600216 | [
"MIT"
] | VoIlAlex/pytorchresearch | docs/example_1/back.py | 1,851 | Python |
from django.db.models import Q
from .base import EntityType
TYPE_VIDEO = "video"
class VideoEntity(EntityType):
name = TYPE_VIDEO
@classmethod
def filter_date_lte(cls, qs, dt):
return qs.filter(publication_date__lte=dt)
@classmethod
def filter_date_gte(cls, qs, dt):
return qs.filter(publication_date__gte=dt)
@classmethod
def filter_search(cls, qs, query):
from tournesol.models import Entity
# Filtering in a nested queryset is necessary here, to be able to annotate
# each entity without duplicated scores, due to the m2m field 'tags'.
return qs.filter(pk__in=Entity.objects.filter(
Q(name__icontains=query) |
Q(description__icontains=query) |
Q(tags__name__icontains=query)
))
| 26.966667 | 82 | 0.673671 | [
"CC0-1.0"
] | iamnkc/tournesol | backend/tournesol/entities/video.py | 809 | Python |
import logging
import random
import time
from jobcontrol.exceptions import SkipBuild
def job_simple_echo(*args, **kwargs):
return (args, kwargs)
_cached_words = None
def _get_words():
global _cached_words
if _cached_words is not None:
return _cached_words
try:
with open('/usr/share/dict/words') as fp:
_cached_words = [x.strip() for x in fp]
except:
_cached_words = []
return _cached_words
def _capfirst(s):
return s[0].upper() + s[1:]
def _random_paragraph(size=10):
return _capfirst(' '.join(random.sample(_get_words(), size)))
def _log_random(logger):
classes = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
logging.CRITICAL)
for num in xrange(random.randint(0, 30)):
logger.log(random.choice(classes),
_random_paragraph(random.randint(10, 20)))
def testing_job(progress_steps=None, retval=None, fail=False, skip=False,
log_messages=None, step_duration=0):
"""
Job used for testing purposes.
:param progress_steps:
A list of tuples: ``(<group_name>, <steps>)``, where "group_name"
is a tuple of name "levels", "steps" an integer representing how
many steps should that level have.
Progress reports will be sent in randomized order.
:param retval:
The return value for the job.
:param fail:
Whether this job should fail.
:param skip:
Whether this job should be skipped.
:param log_messages:
A list of tuples: ``(level, message)``
:param step_duration:
The time to sleep between steps, in milliseconds.
"""
from jobcontrol.globals import execution_context
logger = logging.getLogger('jobcontrol.utils.testing_job')
log_messages = list(log_messages or [])
if progress_steps is None:
progress_steps = [(None, 10)]
totals = {}
counters = {}
progress_report_items = []
for name, steps in progress_steps:
if isinstance(name, list):
# Safe YAML doesn't have tuples, but names must be tuples
name = tuple(name)
if not (name is None or isinstance(name, tuple)):
raise TypeError("Name must be a tuple or None")
for i in xrange(steps):
progress_report_items.append(name)
totals[name] = steps
counters[name] = 0
random.shuffle(progress_report_items)
sleep_time = step_duration * 1.0 / 1000
def report_progress(name, cur, tot, status=None):
app = execution_context.current_app
app.report_progress(
group_name=name, current=cur, total=tot,
status_line=status)
def _should_fail():
return random.randint(0, len(progress_report_items)) == 0
for item in progress_report_items:
counters[item] += 1
report_progress(item, counters[item], totals[item],
'Doing action {0} [{1}/{2}]'
.format(item, counters[item], totals[item]))
if len(log_messages):
lev, msg = log_messages.pop(0)
logger.log(lev, msg)
if fail and _should_fail():
raise RuntimeError(
'This is a simulated exception in the middle of the loop')
if skip and _should_fail():
raise SkipBuild(
'This is a simulated skip in the middle of the loop')
if sleep_time:
time.sleep(sleep_time)
if skip:
# Make sure the job gets skipped
raise SkipBuild('This build should be skipped!')
if fail:
# Make sure the job fails
raise RuntimeError('This is a simulated exception')
return retval
def job_with_logging():
logger = logging.getLogger('jobcontrol.utils.testing.job_with_logging')
logger.setLevel(logging.DEBUG)
logger.debug('This is a debug message')
logger.info('This is an info message')
logger.warning('This is a warning message')
logger.error('This is an error message')
logger.critical('This is a critical message')
try:
raise ValueError('Foobar')
except:
logger.exception('This is an exception message')
def job_with_tracer_log():
from jobcontrol.globals import execution_context
logger = logging.getLogger(__name__)
logger.info('Message from job={0}, build={1}'
.format(execution_context.job_id,
execution_context.build_id))
pass
def job_failing_once():
"""
This job will fail exactly once; retry will be successful
"""
from jobcontrol.globals import current_job
exec_count = len(list(current_job.iter_runs()))
if exec_count <= 1:
# This is the first run
raise RuntimeError("Simulating failure")
return exec_count
def job_echo_config(*args, **kwargs):
"""
Simple job, "echoing" back the current configuration.
"""
from jobcontrol.globals import current_job, current_build
return {
'args': args,
'kwargs': kwargs,
'build_id': current_build.id,
'job_id': current_job.id,
'dependencies': current_build.config['dependencies'],
'config': current_build.config,
}
class RecordingLogHandler(logging.Handler):
"""Log handler that records messages"""
def __init__(self):
super(RecordingLogHandler, self).__init__()
self._messages = []
def flush(self):
pass # Nothing to flush!
def emit(self, record):
self._messages.append(record)
def print_messages(self):
from nicelog.formatters import ColorLineFormatter
formatter = ColorLineFormatter(
show_date=False, show_function=False, show_filename=False,
message_inline=True)
for msg in self._messages:
print(formatter.format(msg))
def clear_messages(self):
self._messages = []
class NonSerializableObject(object):
__slots__ = ['foo', 'bar']
def __init__(self):
self.foo = 'foo'
self.bar = 'bar'
class NonSerializableException(Exception):
def __init__(self):
super(NonSerializableException, self).__init__()
self.nso = NonSerializableObject()
def job_returning_nonserializable():
return NonSerializableObject()
def job_raising_nonserializable():
raise NonSerializableException()
def job_creating_temp_file():
import os
import tempfile
fd, name = tempfile.mkstemp(text=True)
with os.fdopen(fd, 'w') as fp:
fp.write('Hello, world!\n')
return name
def cleanup_temp_file(build):
import os
os.unlink(build.retval)
| 25.80695 | 75 | 0.637193 | [
"Apache-2.0"
] | rshk/jobcontrol | jobcontrol/utils/testing.py | 6,684 | Python |
"""
Django settings for HedgeFund project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
STATIC_ROOT = BASE_DIR / 'staticfiles'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-v)1&-+#yg2xdd$jddc#$z-c!1gu%v*%_^*_&ppw@pm^x%w-^*8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sscapital',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'HedgeFund.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'HedgeFund.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'static'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LOGIN_REDIRECT_URL = '/administrator'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.741007 | 91 | 0.708905 | [
"MIT"
] | atreyasinha/Heptra-Capital | HedgeFund/base.py | 3,717 | Python |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.jid import JID
from sleekxmpp.xmlstream import ElementBase, register_stanza_plugin
class NoSave(ElementBase):
name = 'x'
namespace = 'google:nosave'
plugin_attrib = 'google_nosave'
interfaces = set(['value'])
def get_value(self):
return self._get_attr('value', '') == 'enabled'
def set_value(self, value):
self._set_attr('value', 'enabled' if value else 'disabled')
class NoSaveQuery(ElementBase):
name = 'query'
namespace = 'google:nosave'
plugin_attrib = 'google_nosave'
interfaces = set()
class Item(ElementBase):
name = 'item'
namespace = 'google:nosave'
plugin_attrib = 'item'
plugin_multi_attrib = 'items'
interfaces = set(['jid', 'source', 'value'])
def get_value(self):
return self._get_attr('value', '') == 'enabled'
def set_value(self, value):
self._set_attr('value', 'enabled' if value else 'disabled')
def get_jid(self):
return JID(self._get_attr('jid', ''))
def set_jid(self, value):
self._set_attr('jid', str(value))
def get_source(self):
return JID(self._get_attr('source', ''))
def set_source(self, value):
self._set_attr('source', str(value))
register_stanza_plugin(NoSaveQuery, Item)
| 24.566667 | 67 | 0.650611 | [
"BSD-3-Clause"
] | Dunedan/SleekXMPP | sleekxmpp/plugins/google/nosave/stanza.py | 1,474 | Python |
from sklearn.model_selection import cross_val_score
import pandas as pd
import matplotlib.pyplot as plt
def CrossValidationFolds_Traversal(estimator, vdataset):
"""
Arguments:
- estimator = classifer of model
- vdataset = vehicld dataset
This function computes acccuracy score with the
Cross Validation Score for each KFold with K from 2 to 10
Output:
returns matrix conatining value of K with it's corresponding performance score
"""
X = vdataset.drop(["Class", "Class_code"], axis=1)
y = vdataset["Class_code"]
scores = []
matrix = pd.DataFrame(columns=["KFold", "Accuracy"])
for i in range(2, 11): ##Kfold 2 to 10
score = cross_val_score(estimator, X, y, cv=i, scoring="accuracy")
scores.append(score.mean())
matrix = matrix.append(
{"KFold": i, "Accuracy": (score.mean() * 100),}, ignore_index=True,
)
return matrix
def Visulaize_CrossValidationFolds_Traversal(matrix):
"""
Argument:
- matrix: Dataframe named matrix
Line Plot is drawn for each KFold value with it's respective performance score.
Output:
- plot the line graph
"""
ax = plt.gca()
matrix.plot(
kind="line",
x="KFold",
y="Accuracy",
color="red",
marker="o",
markerfacecolor="blue",
markersize=12,
ax=ax,
)
plt.title("Line plot of No of Kfold with it's corresponding performance score\n")
plt.ylabel("Accuracy\n")
plt.xlabel("\nNo of KFolds")
plt.show()
| 28.535714 | 86 | 0.617647 | [
"MPL-2.0"
] | Bolaji61/PRESC | dev/shiza16/Calibration plot/CrossValidationFold_Traversal.py | 1,598 | Python |
#!/usr/bin/env python
import os
import sys
def addPath(rel_path, prepend=False):
""" Adds a directory to the system python path, either by append (doesn't
override default or globally installed package names) or by prepend
(overrides default/global package names).
"""
path = lambda *paths: os.path.abspath(
os.path.join(os.path.dirname(__file__), *paths)) + '/'
if prepend:
return sys.path.insert(0, path(rel_path))
return sys.path.append(path(rel_path))
# Allow us to not include `djoauth2example` when importing subapps.
addPath('djoauth2example', prepend=True)
# Use the local version of the `djoauth2` library; very useful for manually
# testing the full series of client-server interactions while developing.
addPath('..', prepend=True)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djoauth2example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 34.586207 | 79 | 0.735793 | [
"MIT"
] | Locu/djoauth2 | example/manage.py | 1,003 | Python |
# TODO: your agent here!
import numpy as np
from agents.actor import Actor
from agents.critic import Critic
from agents.buffer import ReplayBuffer
from agents.ou_noise import OUNoise
class DDPG():
"""Reinforcement Learning agent that learns using DDPG."""
def __init__(self, task):
self.task = task
self.state_size = task.state_size
self.action_size = task.action_size
self.action_low = task.action_low
self.action_high = task.action_high
# Actor (Policy) Model
self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
# Critic (Value) Model
self.critic_local = Critic(self.state_size, self.action_size)
self.critic_target = Critic(self.state_size, self.action_size)
# Initialize target model parameters with local model parameters
self.critic_target.model.set_weights(self.critic_local.model.get_weights())
self.actor_target.model.set_weights(self.actor_local.model.get_weights())
# Noise process
self.exploration_mu = 0
self.exploration_theta = 0.15
self.exploration_sigma = 0.2
self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma)
# Replay memory
self.buffer_size = 100000
self.batch_size = 64
self.memory = ReplayBuffer(self.buffer_size, self.batch_size)
# Algorithm parameters
self.gamma = 0.99 # discount factor
self.tau = 0.001 # for soft update of target parameters
# Reward tracking
self.reward_total = 0
self.best_reward = -np.Inf
self.score = 0
self.count = 0
def reset_episode_vars(self):
self.noise.reset()
state = self.task.reset()
self.last_state = state
# Reward tracking
self.reward_total = 0
self.score = 0
self.count = 0
self.max_z = 0
return state
def step(self, action, reward, next_state, done):
# Save experience / reward
self.memory.add(self.last_state, action, reward, next_state, done)
# Track the reward: Compute the total reward for this episode
self.reward_total += reward
self.count += 1
if done:
self.score = self.reward_total / self.count if self.count > 0 else 0
self.score_best = max(self.best_reward, self.reward_total)
# Learn, if enough samples are available in memory
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences)
# Roll over last state and action
self.last_state = next_state
def act(self, state):
"""Returns actions for given state(s) as per current policy."""
state = np.reshape(state, [-1, self.state_size])
action = self.actor_local.model.predict(state)[0]
return list(action + self.noise.sample()) # add some noise for exploration
def learn(self, experiences):
"""Update policy and value parameters using given batch of experience tuples."""
# Convert experience tuples to separate arrays for each element (states, actions, rewards, etc.)
states = np.vstack([e.state for e in experiences if e is not None])
actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1, self.action_size)
rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1, 1)
dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1)
next_states = np.vstack([e.next_state for e in experiences if e is not None])
# Get predicted next-state actions and Q values from target models
# Q_targets_next = critic_target(next_state, actor_target(next_state))
actions_next = self.actor_target.model.predict_on_batch(next_states)
Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next])
# Compute Q targets for current states and train critic model (local)
Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones)
self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets)
# Train actor model (local)
action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size))
self.actor_local.train_fn([states, action_gradients, 1]) # custom training function
# Soft-update target models
self.soft_update(self.critic_local.model, self.critic_target.model)
self.soft_update(self.actor_local.model, self.actor_target.model)
def soft_update(self, local_model, target_model):
"""Soft update model parameters."""
local_weights = np.array(local_model.get_weights())
target_weights = np.array(target_model.get_weights())
assert len(local_weights) == len(target_weights), "Local and target model parameters must have the same size"
new_weights = self.tau * local_weights + (1 - self.tau) * target_weights
target_model.set_weights(new_weights) | 43.096 | 125 | 0.668832 | [
"MIT"
] | GabrielTourinho/dlnd-teach-a-quadcopter-how-to-fly | home/agents/agent.py | 5,387 | Python |
# -*- coding: utf-8 -*-
"""PageParser tests."""
import httpx # noqa: F401
import pytest
from core.database import ProductGinoModel
from core.services import get_product_name
pytestmark = [pytest.mark.asyncio, pytest.mark.api_full]
API_URL_PREFIX = "/api/v1"
@pytest.fixture
def no_css_response() -> bytes:
minified_html = b'<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <title>Title</title></head><body><h1>TestProduct1</h1></body></html>' # noqa: E501
return minified_html
@pytest.fixture
def css_response() -> bytes:
minified_html = '<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <title>Title</title></head><body><h1 class="b3a8">adidas Gazelle</h1></body></html>' # noqa: E501
return minified_html
@pytest.fixture
def no_h1_response() -> bytes:
minified_html = '<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <title>Title</title></head><body><h2 class="b3a8">bad</h2></body></html>' # noqa: E501
return minified_html
@pytest.fixture
def h3_before_h1_response() -> bytes:
minified_html = b'<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <title>Title</title></head><body><h3>Fake</h3><h1>TestProduct3</h1></body></html>' # noqa: E501
return minified_html
@pytest.fixture
def h11_before_h1_response() -> bytes:
minified_html = b'<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <title>Title</title></head><body><h11>Fake</h11><h1>TestProduct4</h1></body></html>' # noqa: E501
return minified_html
@pytest.fixture
async def products_1(single_admin):
return await ProductGinoModel.create(
user_id=single_admin.id, name="test", url="https://devyatkin.dev/1"
)
class TestPageParser:
"""Page Parser core tests."""
async def test_no_css_h1(self, httpx_mock, no_css_response):
test_url = "https://no_css_h1"
httpx_mock.add_response(data=no_css_response, url=test_url)
response = await get_product_name(url=test_url)
assert response == "TestProduct1"
async def test_css_h1_one_chunk(self, httpx_mock, css_response):
test_url = "https://css_h1"
httpx_mock.add_response(data=css_response, url=test_url)
response = await get_product_name(url=test_url, chunk_size=500)
assert response == "adidas Gazelle"
@pytest.mark.asyncio
async def test_css_h1_several_chunks(self, httpx_mock, css_response):
test_url = "https://css_h1"
httpx_mock.add_response(data=css_response, url=test_url)
response = await get_product_name(url=test_url, chunk_size=100)
assert response == "adidas Gazelle"
@pytest.mark.asyncio
async def test_no_h1(self, httpx_mock, no_h1_response):
test_url = "https://no_h1"
httpx_mock.add_response(data=no_h1_response, url=test_url)
response = await get_product_name(url=test_url)
assert response is None
@pytest.mark.asyncio
async def test_h3_before_h1(self, httpx_mock, h3_before_h1_response):
test_url = "https://h3_before_h1"
httpx_mock.add_response(data=h3_before_h1_response, url=test_url)
response = await get_product_name(url=test_url)
assert response == "TestProduct3"
@pytest.mark.skip(reason="valid spec indicates that headers can only be h1-h6.")
@pytest.mark.asyncio
async def test_h11_before_h1(self, httpx_mock, h11_before_h1_response):
test_url = "https://h11_before_h1"
httpx_mock.add_response(data=h11_before_h1_response, url=test_url)
response = await get_product_name(url=test_url)
assert response == "TestProduct4"
@pytest.mark.asyncio
async def test_bad_response(self, httpx_mock):
test_url = "https://bad"
httpx_mock.add_response(status_code=500)
response = await get_product_name(url=test_url)
assert response is None
class TestApi:
"""PageParser api response tests."""
API_URL = f"{API_URL_PREFIX}/extract-product-title"
async def test_unauthenticated_input(self, snapshot, backend_app):
resp = await backend_app.post(
f"{self.API_URL}",
json={"data": {"attributes": {"url": "https://css_h1.io"}}},
)
assert resp.status_code == 401
async def test_bad_input(self, snapshot, backend_app, single_admin_auth_headers):
resp = await backend_app.post(
f"{self.API_URL}",
json={"data": {"attributes": {"url": "bad-url"}}},
headers=single_admin_auth_headers,
)
assert resp.status_code == 422
snapshot.assert_match(resp.json())
async def test_good_input(
self, httpx_mock, snapshot, backend_app, css_response, single_admin_auth_headers
):
test_url = "https://css_h1.io"
httpx_mock.add_response(data=css_response, url=test_url)
response = await backend_app.post(
f"{self.API_URL}",
json={"data": {"attributes": {"url": test_url}}},
headers=single_admin_auth_headers,
)
assert response.status_code == 200
snapshot.assert_match(response.json())
async def test_bad_url(
self, httpx_mock, snapshot, backend_app, single_admin_auth_headers
):
test_url = "https://bad.io"
httpx_mock.add_response(status_code=500)
response = await backend_app.post(
f"{self.API_URL}",
json={"data": {"attributes": {"url": test_url}}},
headers=single_admin_auth_headers,
)
assert response.status_code == 200
snapshot.assert_match(response.json())
async def test_existing_product(
self, snapshot, backend_app, products_1, css_response, single_admin_auth_headers
):
test_url = products_1.url
response = await backend_app.post(
f"{self.API_URL}",
json={"data": {"attributes": {"url": test_url}}},
headers=single_admin_auth_headers,
)
assert response.status_code == 200
snapshot.assert_match(response.json())
| 35.497076 | 181 | 0.668369 | [
"MIT"
] | devalv/yawm | backend/tests/test_page_parser.py | 6,070 | Python |
print('running module1.py...')
a = 100
| 13 | 30 | 0.641026 | [
"Apache-2.0"
] | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | python-tuts/0-beginner/8-Modules_Packages_Namespaces/module1.py | 39 | Python |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from urllib import parse
from kazoo.client import KazooClient
from kazoo.security import make_digest_acl
from arch.api.utils import file_utils
from arch.api.utils.core_utils import get_lan_ip
class CenterConfig(object):
SERVERS = None
USE_ACL = False
ZK_USERNAME = 'fate'
ZK_PASSWORD = 'fate'
@staticmethod
def get_settings(path, servings_zk_path=None, use_zk=False, hosts=None, server_conf_path=''):
if servings_zk_path and use_zk:
return CenterConfig.get_servings_from_zookeeper(servings_zk_path, hosts)
return CenterConfig.get_settings_from_file(path, server_conf_path)
@staticmethod
def get_settings_from_file(path, server_conf_path):
server_conf = file_utils.load_json_conf(server_conf_path)
data = server_conf
for k in path.split('/')[1:]:
data = data.get(k, None)
return data
@staticmethod
def get_zk(hosts):
if CenterConfig.USE_ACL:
default_acl = make_digest_acl(CenterConfig.ZK_USERNAME, CenterConfig.ZK_PASSWORD, all=True)
zk = KazooClient(hosts=hosts, default_acl=[default_acl], auth_data=[("digest", "{}:{}".format(
CenterConfig.ZK_USERNAME, CenterConfig.ZK_PASSWORD))])
else:
zk = KazooClient(hosts=hosts)
return zk
@staticmethod
def get_servings_from_zookeeper(path, hosts):
try:
zk = CenterConfig.get_zk(hosts)
zk.start()
nodes = zk.get_children(path)
CenterConfig.SERVERS = nodes_unquote(nodes)
zk.stop()
return CenterConfig.SERVERS
except Exception as e:
raise Exception('loading servings node failed from zookeeper: {}'.format(e))
@staticmethod
def init(hosts, use_configuation_center, fate_flow_zk_path, fate_flow_port, model_transfer_path):
if use_configuation_center:
zk = CenterConfig.get_zk(hosts)
zk.start()
model_host = 'http://{}:{}{}'.format(get_lan_ip(), fate_flow_port, model_transfer_path)
fate_flow_zk_path = '{}/{}'.format(fate_flow_zk_path, parse.quote(model_host, safe=' '))
try:
zk.create(fate_flow_zk_path, makepath=True)
except:
pass
zk.stop()
def nodes_unquote(nodes):
urls = [parse.unquote(node) for node in nodes]
servings = []
for url in urls:
try:
servings.append(url.split('/')[2])
except:
pass
return servings | 35.561798 | 106 | 0.661927 | [
"Apache-2.0"
] | AustinNeverPee/FedRec | fate_flow/utils/setting_utils.py | 3,165 | Python |
import os
from easydict import EasyDict
import torch
# architecture
from basicts.archs.DCRNN_arch import DCRNN
# runner
from basicts.runners.DCRNN_runner import DCRNNRunner
from basicts.data.base_dataset import BaseDataset
from basicts.metrics.mae import masked_mae
from basicts.metrics.mape import masked_mape
from basicts.metrics.rmse import masked_rmse
from basicts.losses.losses import masked_l1_loss
from basicts.utils.serialization import load_adj
CFG = EasyDict()
resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration
if not resume:
import random
_ = random.randint(-1e6, 1e6)
# ================= general ================= #
CFG.DESCRIPTION = 'DCRNN model configuration'
CFG.RUNNER = DCRNNRunner
CFG.DATASET_CLS = BaseDataset
CFG.DATASET_NAME = "PEMS07"
CFG.DATASET_TYPE = 'Traffic speed'
CFG._ = _
CFG.GPU_NUM = 1
CFG.METRICS = {
"MAE": masked_mae,
"RMSE": masked_rmse,
"MAPE": masked_mape
}
# ================= environment ================= #
CFG.ENV = EasyDict()
CFG.ENV.SEED = 1
CFG.ENV.CUDNN = EasyDict()
CFG.ENV.CUDNN.ENABLED = True
# ================= model ================= #
CFG.MODEL = EasyDict()
CFG.MODEL.NAME = 'DCRNN'
CFG.MODEL.ARCH = DCRNN
adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition")
CFG.MODEL.PARAM = {
"cl_decay_steps" : 2000,
"horizon" : 12,
"input_dim" : 2,
"max_diffusion_step": 2,
"num_nodes" : 883,
"num_rnn_layers" : 2,
"output_dim" : 1,
"rnn_units" : 64,
"seq_len" : 12,
"adj_mx" : [torch.tensor(i).cuda() for i in adj_mx],
"use_curriculum_learning": True
}
CFG.MODEL.FROWARD_FEATURES = [0, 1] # traffic speed, time in day
CFG.MODEL.TARGET_FEATURES = [0] # traffic speed
# ================= optim ================= #
CFG.TRAIN = EasyDict()
CFG.TRAIN.LOSS = masked_l1_loss
CFG.TRAIN.OPTIM = EasyDict()
CFG.TRAIN.OPTIM.TYPE = "Adam"
CFG.TRAIN.OPTIM.PARAM= {
"lr":0.003,
"eps":1e-3
}
CFG.TRAIN.LR_SCHEDULER = EasyDict()
CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR"
CFG.TRAIN.LR_SCHEDULER.PARAM= {
"milestones":[80],
"gamma":0.3
}
# ================= train ================= #
# CFG.TRAIN.CLIP = 5
CFG.TRAIN.NUM_EPOCHS = 200
CFG.TRAIN.CKPT_SAVE_DIR = os.path.join(
'checkpoints',
'_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)])
)
CFG.TRAIN.SETUP_GRAPH = True
# train data
CFG.TRAIN.DATA = EasyDict()
CFG.TRAIN.NULL_VAL = 0.0
## read data
CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TRAIN.DATA.BATCH_SIZE = 64
CFG.TRAIN.DATA.PREFETCH = False
CFG.TRAIN.DATA.SHUFFLE = True
CFG.TRAIN.DATA.NUM_WORKERS = 2
CFG.TRAIN.DATA.PIN_MEMORY = False
# ================= validate ================= #
CFG.VAL = EasyDict()
CFG.VAL.INTERVAL = 1
# validating data
CFG.VAL.DATA = EasyDict()
## read data
CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.VAL.DATA.BATCH_SIZE = 64
CFG.VAL.DATA.PREFETCH = False
CFG.VAL.DATA.SHUFFLE = False
CFG.VAL.DATA.NUM_WORKERS = 2
CFG.VAL.DATA.PIN_MEMORY = False
# ================= test ================= #
CFG.TEST = EasyDict()
CFG.TEST.INTERVAL = 1
# validating data
CFG.TEST.DATA = EasyDict()
## read data
CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TEST.DATA.BATCH_SIZE = 64
CFG.TEST.DATA.PREFETCH = False
CFG.TEST.DATA.SHUFFLE = False
CFG.TEST.DATA.NUM_WORKERS = 2
CFG.TEST.DATA.PIN_MEMORY = False
| 29.206349 | 112 | 0.628804 | [
"Apache-2.0"
] | zezhishao/BasicTS | basicts/options/DCRNN/DCRNN_PEMS07.py | 3,680 | Python |
import pylab as pl
import numpy as np
from os import path
from numpy import abs, linspace, sin, pi, int16
import pandas
def plotfft(s, fmax, doplot=False):
""" This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
"""
fs = abs(np.fft.fft(s))
f = linspace(0, fmax / 2, len(s) / 2)
if doplot:
pl.plot(f[1:len(s) / 2], fs[1:len(s) / 2])
return (f[1:len(s) / 2].copy(), fs[1:len(s) / 2].copy())
def synthbeats2(duration, meanhr=60, stdhr=1, samplingfreq=250):
#Minimaly based on the parameters from:
#http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m
#Inputs: duration in seconds
#Returns: signal, peaks
ibi = 60 / float(meanhr) * samplingfreq
sibi = ibi - 60 / (float(meanhr) - stdhr) * samplingfreq
peaks = np.arange(0, duration * samplingfreq, ibi)
peaks[1:] = peaks[1:] + np.random.randn(len(peaks) - 1) * sibi
if peaks[-1] >= duration * samplingfreq:
peaks = peaks[:-1]
peaks = peaks.astype('int')
signal = np.zeros(duration * samplingfreq)
signal[peaks] = 1.0
return signal, peaks
def synthbeats(duration, meanhr=60, stdhr=1, samplingfreq=250, sinfreq=None):
#Minimaly based on the parameters from:
#http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m
#If freq exist it will be used to generate a sin instead of using rand
#Inputs: duration in seconds
#Returns: signal, peaks
t = np.arange(duration * samplingfreq) / float(samplingfreq)
signal = np.zeros(len(t))
print(len(t))
print(len(signal))
if sinfreq == None:
npeaks = 1.2 * (duration * meanhr / 60)
# add 20% more beats for some cummulative error
hr = pl.randn(npeaks) * stdhr + meanhr
peaks = pl.cumsum(60. / hr) * samplingfreq
peaks = peaks.astype('int')
peaks = peaks[peaks < t[-1] * samplingfreq]
else:
hr = meanhr + sin(2 * pi * t * sinfreq) * float(stdhr)
index = int(60. / hr[0] * samplingfreq)
peaks = []
while index < len(t):
peaks += [index]
index += int(60. / hr[index] * samplingfreq)
signal[peaks] = 1.0
return t, signal, peaks
def load_with_cache(file_, recache=False, sampling=1,
columns=None, temp_dir='.', data_type='int16'):
"""@brief This function loads a file from the current directory and saves
the cached file to later executions. It's also possible to make a recache
or a subsampling of the signal and choose only a few columns of the signal,
to accelerate the opening process.
@param file String: the name of the file to open.
@param recache Boolean: indication whether it's done recache or not
(default = false).
@param sampling Integer: the sampling step. if 1, the signal isn't
sampled (default = 1).
@param columns Array-Like: the columns to read from the file. if None,
all columns are considered (default = None).
@return data Array-Like: the data from the file.
TODO: Should save cache in a different directory
TODO: Create test function and check size of generated files
TODO: receive a file handle
"""
cfile = '%s.npy' % file_
if (not path.exists(cfile)) or recache:
if columns == None:
data = np.loadtxt(file_)[::sampling, :]
else:
data = np.loadtxt(file_)[::sampling, columns]
np.save(cfile, data.astype(data_type))
else:
data = np.load(cfile)
return data
def load_data(filename):
"""
:rtype : numpy matrix
"""
data = pandas.read_csv(filename, header=None, delimiter='\t', skiprows=9)
return data.as_matrix()
| 30.503597 | 80 | 0.60684 | [
"MIT"
] | novabiosignals/novainstrumentation | novainstrumentation/tools.py | 4,240 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-09-26 01:53
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=ckeditor.fields.RichTextField(verbose_name='Content'),
),
]
| 21.409091 | 72 | 0.626327 | [
"MIT"
] | COAStatistics/aprp | src/apps/posts/migrations/0002_auto_20180926_0953.py | 471 | Python |
import os
import copy
import pytest
from jikken.api import Experiment
import git
@pytest.fixture(autouse=True, scope='module')
def experiment_setup(tmpdir_factory):
expected_variables = {
"training_parameters":
{"batch_size": 100,
"algorithm": "Seq2Seq",
"attention": "multiplicative"
},
"input_parameters":
{'batch_size': 4,
"filepath": "/data",
"preprocessing": True,
"transformations": ["stopwords", "tokenize", "remove_punct"]
}
}
tags = ['test', 'simple']
tmpdir = tmpdir_factory.mktemp('mydir')
return expected_variables, tags, tmpdir
def test_experiment_equality(experiment_setup):
# Given some variables and tags
expected_variables, tags, tmpdir = experiment_setup
# When I create an experiment
exp1 = Experiment("exp1", variables=expected_variables, code_dir=str(tmpdir), tags=tags)
# And another one with teh same inputs
exp2 = Experiment("exp1", variables=expected_variables, code_dir=str(tmpdir), tags=tags)
# Then they are equal
assert exp1 == exp2
# And when i create a third one with an extra tag
tags3 = tags + ["third tag"]
exp3 = Experiment("exp1", variables=expected_variables, code_dir=str(tmpdir), tags=tags3)
# Then that is also equal
assert exp1 == exp3
def test_experiment_not_equality(experiment_setup):
# Given some variables and tags
expected_variables, tags, tmpdir = experiment_setup
# When I create an experiment
exp1 = Experiment("exp1", variables=expected_variables, code_dir=str(tmpdir), tags=tags)
# And whe I create one with a different name
exp5 = Experiment("exp2", variables=expected_variables, code_dir=str(tmpdir), tags=tags)
# Then it is not equal
assert exp1 != exp5
# And when I create one with different variables
new_variables = copy.deepcopy(expected_variables)
new_variables["training_parameters"]["batch_size"] = 5
exp4 = Experiment("exp1", variables=new_variables, code_dir=str(tmpdir), tags=tags)
# Then it is not equal
assert exp1 != exp4
@pytest.fixture(autouse=True)
def jikken_experiment(experiment_setup):
expected_variables, tags, tmpdir = experiment_setup
repo_dir = str(tmpdir)
file_name = os.path.join(repo_dir, 'new-file')
r = git.Repo.init(repo_dir)
# This function just creates an empty file ...
open(file_name, 'wb').close()
r.index.add([file_name])
r.index.commit("initial commit")
exp = Experiment(name="exp", variables=expected_variables, code_dir=repo_dir, tags=tags)
return exp, expected_variables, tags, tmpdir
def test_experiment_variables(jikken_experiment):
"test variables are initialized properly and are not settable"
exp, expected_variables, *_ = jikken_experiment
assert exp.variables == expected_variables
with pytest.raises(AttributeError):
exp.variables = expected_variables
def test_experiment_tags(jikken_experiment):
"test tags are initialized properly and are not settable"
exp, _, expected_tags, _ = jikken_experiment
assert exp.tags == expected_tags
with pytest.raises(AttributeError):
exp.tags = expected_tags
def test_experiment_schema(jikken_experiment):
"test schema is constructed properly"
exp, expected_variables, _, tmpdir = jikken_experiment
expected_hash = '40a3f5106cf9426bd4b13b168717e7bf'
assert exp.schema_hash == expected_hash
exp_2 = Experiment(name="exp1", variables=expected_variables, code_dir=tmpdir.strpath)
assert exp_2.schema_hash == exp.schema_hash
def test_experiment_parameters_schema(jikken_experiment):
"test schema with parameters is constructed properly"
exp, expected_variables, _, tmpdir = jikken_experiment
expected_hash = '77c861c501833128e1cfb5b398588a7e'
assert exp.parameters_hash == expected_hash
def test_experiment_parameters_schema_comparison(jikken_experiment):
exp, expected_variables, _, tmpdir = jikken_experiment
diff_variables = copy.deepcopy(expected_variables)
diff_variables['training_parameters']['batch_size'] = 200
exp_2 = Experiment(name="exp2", variables=diff_variables, code_dir=tmpdir.strpath)
assert exp_2.schema_hash == exp.schema_hash
assert exp.parameters_hash != exp_2.parameters_hash
def text_same_experiment_hash(jikken_experiment):
exp, expected_variables, _, tmpdir = jikken_experiment
exp_same = Experiment(variables=expected_variables, code_dir=tmpdir.strpath)
assert exp.hash == exp_same.hash
assert exp.parameters_hash == exp_same.parameters_hash
assert exp.schema_hash == exp_same.schema_hash
def text_experiment_different_hash(jikken_experiment):
exp, expected_variables, _, tmpdir = jikken_experiment
exp_diff_dir = Experiment(variables=expected_variables, code_dir=os.getcwd())
assert exp.parameters_hash == exp_diff_dir.parameters_hash
assert exp.schema_hash == exp_diff_dir.schema_hash
assert exp_diff_dir.hash != exp.hash
diff_variables = copy.deepcopy(expected_variables)
diff_variables['training_parameters']['batch_size'] = 200
exp_diff_variables = Experiment(name="exp1", variables=diff_variables, code_dir=tmpdir.strpath)
assert exp.parameters_hash != exp_diff_dir.parameters_hash
assert exp.schema_hash == exp_diff_dir.schema_hash
assert exp_diff_variables.hash != exp.hash
def test_experiment_different_tags_hash(jikken_experiment):
exp, expected_variables, _, tmpdir = jikken_experiment
exp_diff_tags = Experiment(name="exp", variables=expected_variables, code_dir=tmpdir.strpath, tags='test2')
assert exp.schema_hash == exp_diff_tags.schema_hash
assert exp.parameters_hash == exp_diff_tags.parameters_hash
assert exp.hash == exp_diff_tags.hash
def test_experiment_different_names(jikken_experiment):
exp, expected_variables, _, tmpdir = jikken_experiment
exp_diff_tags = Experiment(name="exp1", variables=expected_variables, code_dir=tmpdir.strpath, tags='test2')
assert exp.schema_hash == exp_diff_tags.schema_hash
assert exp.parameters_hash == exp_diff_tags.parameters_hash
assert exp.hash != exp_diff_tags.hash
def test_experiment_from_dict_is_same(jikken_experiment):
exp, expected_variables, _, tmpdir = jikken_experiment
new_exp = Experiment.from_dict(exp.to_dict())
assert new_exp == exp
| 40.408805 | 112 | 0.74179 | [
"MIT"
] | outcastofmusic/jikken | tests/unit/test_experiment.py | 6,425 | Python |
# Copyright © 2020 Hashmap, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from hdm.core.error.hdm_error import HDMError
from hdm.core.orchestrator.orchestrator import Orchestrator
from hdm.core.sink.sink import Sink
from hdm.core.source.source import Source
from hdm.core.utils.parse_config import ParseConfig
from hdm.data_link import DataLink
from hdm.data_link_builder import DataLinkBuilder
class DeclaredOrchestrator(Orchestrator):
"""This is an orchestrator which build DataLinks and will run them as defined - they must be fully defined."""
def run_pipelines(self):
_ = [hdm.run() for hdm in self._data_links]
def _build_data_links(self):
config = ParseConfig.parse(config_path=os.getenv('HDM_MANIFEST'))
state_manager_config = config['state_manager']
manifest_name = os.getenv('HDM_MANIFEST')[os.getenv('HDM_MANIFEST').rindex("/")+1:]
run_id = uuid.uuid4().hex
for link_config in config['declared_data_links']['stages']:
# Create New State Manager
link_state = self._generate_state_manager(state_manager_config=state_manager_config,
data_link_config=link_config,
manifest_name=manifest_name,
run_id=run_id)
# Add the state manager to the sink and source
link_config['source']['conf']['state_manager'] = link_state
link_config['sink']['conf']['state_manager'] = link_state
source = DataLinkBuilder.build_source(link_config['source'])
if not isinstance(source, Source):
error = f'Source {type(source)} is not a Source.'
self._logger.error(error)
raise HDMError(error)
sink = DataLinkBuilder.build_sink(link_config['sink'])
if not isinstance(sink, Sink):
error = f'Sink {type(sink)} is not a Sink.'
self._logger.error(error)
raise HDMError(error)
self._data_links.append(
DataLink(
source=source,
sink=sink
)
)
| 41.794118 | 115 | 0.620338 | [
"Apache-2.0"
] | hashmapinc/hdm | hdm/core/orchestrator/declared_orchestrator.py | 2,843 | Python |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import os
import sys
import json
import uuid
import tempfile
import time
import logging as logbase
import mock
from oslo_config import cfg
from st2common import log as logging
from st2common.logging.formatters import ConsoleLogFormatter
from st2common.logging.formatters import GelfLogFormatter
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
from st2common.models.db.action import ActionDB
from st2common.models.db.rule import RuleDB
from st2common.models.db.execution import ActionExecutionDB
import st2tests.config as tests_config
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
RESOURCES_DIR = os.path.abspath(os.path.join(CURRENT_DIR, "../resources"))
CONFIG_FILE_PATH = os.path.join(RESOURCES_DIR, "logging.conf")
MOCK_MASKED_ATTRIBUTES_BLACKLIST = [
"blacklisted_1",
"blacklisted_2",
"blacklisted_3",
]
class MockRecord(object):
levelno = 40
msg = None
exc_info = None
exc_text = None
stack_info = None
created = time.time()
def getMessage(self):
return self.msg
class LoggerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def setUp(self):
super(LoggerTestCase, self).setUp()
self.config_text = open(CONFIG_FILE_PATH).read()
self.cfg_fd, self.cfg_path = tempfile.mkstemp()
self.info_log_fd, self.info_log_path = tempfile.mkstemp()
self.audit_log_fd, self.audit_log_path = tempfile.mkstemp()
with open(self.cfg_path, "a") as f:
f.write(self.config_text.format(self.info_log_path, self.audit_log_path))
def tearDown(self):
self._remove_tempfile(self.cfg_fd, self.cfg_path)
self._remove_tempfile(self.info_log_fd, self.info_log_path)
self._remove_tempfile(self.audit_log_fd, self.audit_log_path)
super(LoggerTestCase, self).tearDown()
def _remove_tempfile(self, fd, path):
os.close(fd)
os.unlink(path)
def test_logger_setup_failure(self):
config_file = "/tmp/abc123"
self.assertFalse(os.path.exists(config_file))
self.assertRaises(Exception, logging.setup, config_file)
def test_logger_set_level(self):
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
self.assertEqual(log.getEffectiveLevel(), logbase.DEBUG)
log.setLevel(logbase.INFO)
self.assertEqual(log.getEffectiveLevel(), logbase.INFO)
log.setLevel(logbase.WARN)
self.assertEqual(log.getEffectiveLevel(), logbase.WARN)
log.setLevel(logbase.ERROR)
self.assertEqual(log.getEffectiveLevel(), logbase.ERROR)
log.setLevel(logbase.CRITICAL)
self.assertEqual(log.getEffectiveLevel(), logbase.CRITICAL)
log.setLevel(logbase.AUDIT)
self.assertEqual(log.getEffectiveLevel(), logbase.AUDIT)
def test_log_info(self):
"""Test that INFO log entry does not go to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.info(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertNotIn(msg, audit_log_entries)
def test_log_critical(self):
"""Test that CRITICAL log entry does not go to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.critical(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertNotIn(msg, audit_log_entries)
def test_log_audit(self):
"""Test that AUDIT log entry goes to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.audit(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertIn(msg, audit_log_entries)
class ConsoleLogFormatterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_format(self):
formatter = ConsoleLogFormatter()
# No extra attributes
mock_message = "test message 1"
record = MockRecord()
record.msg = mock_message
message = formatter.format(record=record)
self.assertEqual(message, mock_message)
# Some extra attributes
mock_message = "test message 2"
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._user_id = 1
record._value = "bar"
record.ignored = "foo" # this one is ignored since it doesnt have a prefix
message = formatter.format(record=record)
expected = "test message 2 (value='bar',user_id=1)"
self.assertEqual(sorted(message), sorted(expected))
@mock.patch(
"st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST",
MOCK_MASKED_ATTRIBUTES_BLACKLIST,
)
def test_format_blacklisted_attributes_are_masked(self):
formatter = ConsoleLogFormatter()
mock_message = "test message 1"
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._blacklisted_1 = "test value 1"
record._blacklisted_2 = "test value 2"
record._blacklisted_3 = {
"key1": "val1",
"blacklisted_1": "val2",
"key3": "val3",
}
record._foo1 = "bar"
message = formatter.format(record=record)
expected = (
"test message 1 (blacklisted_1='********',blacklisted_2='********',"
"blacklisted_3={'key3': 'val3', 'key1': 'val1', 'blacklisted_1': '********'},"
"foo1='bar')"
)
self.assertEqual(sorted(message), sorted(expected))
@mock.patch(
"st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST",
MOCK_MASKED_ATTRIBUTES_BLACKLIST,
)
def test_format_custom_blacklist_attributes_are_masked(self):
cfg.CONF.set_override(
group="log",
name="mask_secrets_blacklist",
override=["blacklisted_4", "blacklisted_5"],
)
formatter = ConsoleLogFormatter()
mock_message = "test message 1"
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._blacklisted_1 = "test value 1"
record._blacklisted_2 = "test value 2"
record._blacklisted_3 = {
"key1": "val1",
"blacklisted_1": "val2",
"key3": "val3",
}
record._blacklisted_4 = "fowa"
record._blacklisted_5 = "fiva"
record._foo1 = "bar"
message = formatter.format(record=record)
expected = (
"test message 1 (foo1='bar',blacklisted_1='********',blacklisted_2='********',"
"blacklisted_3={'key3': 'val3', 'key1': 'val1', 'blacklisted_1': '********'},"
"blacklisted_4='********',blacklisted_5='********')"
)
self.assertEqual(sorted(message), sorted(expected))
@mock.patch(
"st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST",
MOCK_MASKED_ATTRIBUTES_BLACKLIST,
)
def test_format_secret_action_parameters_are_masked(self):
formatter = ConsoleLogFormatter()
mock_message = "test message 1"
parameters = {
"parameter1": {"type": "string", "required": False},
"parameter2": {"type": "string", "required": False, "secret": True},
}
mock_action_db = ActionDB(
pack="testpack", name="test.action", parameters=parameters
)
action = mock_action_db.to_serializable_dict()
parameters = {"parameter1": "value1", "parameter2": "value2"}
mock_action_execution_db = ActionExecutionDB(
action=action, parameters=parameters
)
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._action_execution_db = mock_action_execution_db
expected_msg_part = (
r"'parameters': {u?'parameter1': u?'value1', "
r"u?'parameter2': u?'\*\*\*\*\*\*\*\*'}"
)
message = formatter.format(record=record)
self.assertIn("test message 1", message)
self.assertRegexpMatches(message, expected_msg_part)
@mock.patch(
"st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST",
MOCK_MASKED_ATTRIBUTES_BLACKLIST,
)
def test_format_rule(self):
expected_result = {
"description": "Test description",
"tags": [],
"type": {"ref": "standard", "parameters": {}},
"enabled": True,
"trigger": "test tigger",
"metadata_file": None,
"context": {},
"criteria": {},
"action": {"ref": "1234", "parameters": {"b": 2}},
"uid": "rule:testpack:test.action",
"pack": "testpack",
"ref": "testpack.test.action",
"id": None,
"name": "test.action",
}
mock_rule_db = RuleDB(
pack="testpack",
name="test.action",
description="Test description",
trigger="test tigger",
action={"ref": "1234", "parameters": {"b": 2}},
)
result = mock_rule_db.to_serializable_dict()
self.assertEqual(expected_result, result)
@mock.patch(
"st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST",
MOCK_MASKED_ATTRIBUTES_BLACKLIST,
)
@mock.patch("st2common.models.db.rule.RuleDB._get_referenced_action_model")
def test_format_secret_rule_parameters_are_masked(
self, mock__get_referenced_action_model
):
expected_result = {
"description": "Test description",
"tags": [],
"type": {"ref": "standard", "parameters": {}},
"enabled": True,
"trigger": "test tigger",
"metadata_file": None,
"context": {},
"criteria": {},
"action": {
"ref": "1234",
"parameters": {"parameter1": "value1", "parameter2": "********"},
},
"uid": "rule:testpack:test.action",
"pack": "testpack",
"ref": "testpack.test.action",
"id": None,
"name": "test.action",
}
parameters = {
"parameter1": {"type": "string", "required": False},
"parameter2": {"type": "string", "required": False, "secret": True},
}
mock_action_db = ActionDB(
pack="testpack", name="test.action", parameters=parameters
)
mock__get_referenced_action_model.return_value = mock_action_db
cfg.CONF.set_override(group="log", name="mask_secrets", override=True)
mock_rule_db = RuleDB(
pack="testpack",
name="test.action",
description="Test description",
trigger="test tigger",
action={
"ref": "1234",
"parameters": {"parameter1": "value1", "parameter2": "value2"},
},
)
result = mock_rule_db.to_serializable_dict(True)
self.assertEqual(expected_result, result)
class GelfLogFormatterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_format(self):
formatter = GelfLogFormatter()
expected_keys = [
"version",
"host",
"short_message",
"full_message",
"timestamp",
"timestamp_f",
"level",
]
# No extra attributes
mock_message = "test message 1"
record = MockRecord()
record.msg = mock_message
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertIn(key, parsed)
self.assertEqual(parsed["short_message"], mock_message)
self.assertEqual(parsed["full_message"], mock_message)
# Some extra attributes
mock_message = "test message 2"
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._user_id = 1
record._value = "bar"
record.ignored = "foo" # this one is ignored since it doesnt have a prefix
record.created = 1234.5678
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertIn(key, parsed)
self.assertEqual(parsed["short_message"], mock_message)
self.assertEqual(parsed["full_message"], mock_message)
self.assertEqual(parsed["_user_id"], 1)
self.assertEqual(parsed["_value"], "bar")
self.assertEqual(parsed["timestamp"], 1234)
self.assertEqual(parsed["timestamp_f"], 1234.5678)
self.assertNotIn("ignored", parsed)
# Record with an exception
mock_exception = Exception("mock exception bar")
try:
raise mock_exception
except Exception:
mock_exc_info = sys.exc_info()
# Some extra attributes
mock_message = "test message 3"
record = MockRecord()
record.msg = mock_message
record.exc_info = mock_exc_info
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertIn(key, parsed)
self.assertEqual(parsed["short_message"], mock_message)
self.assertIn(mock_message, parsed["full_message"])
self.assertIn("Traceback", parsed["full_message"])
self.assertIn("_exception", parsed)
self.assertIn("_traceback", parsed)
def test_extra_object_serialization(self):
class MyClass1(object):
def __repr__(self):
return "repr"
class MyClass2(object):
def to_dict(self):
return "to_dict"
class MyClass3(object):
def to_serializable_dict(self, mask_secrets=False):
return "to_serializable_dict"
formatter = GelfLogFormatter()
record = MockRecord()
record.msg = "message"
record._obj1 = MyClass1()
record._obj2 = MyClass2()
record._obj3 = MyClass3()
message = formatter.format(record=record)
parsed = json.loads(message)
self.assertEqual(parsed["_obj1"], "repr")
self.assertEqual(parsed["_obj2"], "to_dict")
self.assertEqual(parsed["_obj3"], "to_serializable_dict")
@mock.patch(
"st2common.logging.formatters.MASKED_ATTRIBUTES_BLACKLIST",
MOCK_MASKED_ATTRIBUTES_BLACKLIST,
)
def test_format_blacklisted_attributes_are_masked(self):
formatter = GelfLogFormatter()
# Some extra attributes
mock_message = "test message 1"
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._blacklisted_1 = "test value 1"
record._blacklisted_2 = "test value 2"
record._blacklisted_3 = {
"key1": "val1",
"blacklisted_1": "val2",
"key3": "val3",
}
record._foo1 = "bar"
message = formatter.format(record=record)
parsed = json.loads(message)
self.assertEqual(parsed["_blacklisted_1"], MASKED_ATTRIBUTE_VALUE)
self.assertEqual(parsed["_blacklisted_2"], MASKED_ATTRIBUTE_VALUE)
self.assertEqual(parsed["_blacklisted_3"]["key1"], "val1")
self.assertEqual(
parsed["_blacklisted_3"]["blacklisted_1"], MASKED_ATTRIBUTE_VALUE
)
self.assertEqual(parsed["_blacklisted_3"]["key3"], "val3")
self.assertEqual(parsed["_foo1"], "bar")
# Assert that the original dict is left unmodified
self.assertEqual(record._blacklisted_1, "test value 1")
self.assertEqual(record._blacklisted_2, "test value 2")
self.assertEqual(record._blacklisted_3["key1"], "val1")
self.assertEqual(record._blacklisted_3["blacklisted_1"], "val2")
self.assertEqual(record._blacklisted_3["key3"], "val3")
| 33.533981 | 91 | 0.618587 | [
"Apache-2.0"
] | Anshika-Gautam/st2 | st2common/tests/unit/test_logger.py | 17,270 | Python |
import os
from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.conversions import Conversions as CV
from common.realtime import DT_CTRL
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
from selfdrive.version import get_short_branch
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
OVERRIDE = 'override'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
return any(event_type in EVENTS.get(e, {}) for e in self.events)
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}):
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str, visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "openpilot will disengage"
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "Always keep hands on wheel and eyes on road", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 10.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def startup_master_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
branch = get_short_branch("")
if "REPLAY" in os.environ:
branch = "replay"
return StartupAlert("WARNING: This branch is not tested", branch, alert_status=AlertStatus.userPrompt)
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in (log.PandaState.PandaType.uno, log.PandaState.PandaType.dos)
return Alert(
"Poor GPS reception",
"Hardware malfunctioning if sky is visible" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: startup_master_alert,
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"Lane Departure Detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"Steering Temporarily Unavailable",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Go",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lanes",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"Take Control",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Likely Hardware Issue"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Likely Hardware Issue"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Likely Hardware Issue"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
# ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Hardware Malfunction"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.pedalPressedPreEnable: {
ET.PRE_ENABLE: Alert(
"Release Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
EventName.gasPressedOverride: {
ET.OVERRIDE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.USER_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.lkasDisabled: {
ET.PERMANENT: NormalPermanentAlert("LKAS Disabled: Enable LKAS to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Disabled"),
},
}
| 35.066272 | 152 | 0.69515 | [
"MIT"
] | GregorKikelj/openpilot | selfdrive/controls/lib/events.py | 29,631 | Python |
from rest_framework import views
from rest_framework.response import Response
from sponsors.models import Sponsor, OpenRole
class SponsorAPIView(views.APIView):
def get(self, request):
sponsor_data = Sponsor.objects.order_by('level')
level_dict = {}
for sponsor in sponsor_data:
if sponsor.level_en_name not in level_dict:
level_dict[sponsor.level_en_name] = []
level_dict[sponsor.level_en_name].append({
"name": sponsor.name,
"subtitle_en_us": sponsor.subtitle_en_us,
"subtitle_zh_hant": sponsor.subtitle_zh_hant,
"intro_en_us": sponsor.intro_en_us,
"intro_zh_hant": sponsor.intro_zh_hant,
"website_url": sponsor.website_url,
"logo_url": sponsor.logo.url if sponsor.logo else ''
})
response_data = {"data": []}
for level_name, sponsors in level_dict.items():
response_data["data"].append({
"level_name": level_name,
"sponsors": sponsors
})
return Response(response_data)
class JobAPIView(views.APIView):
def get(self, request):
sponsor_has_open_role = set(OpenRole.objects.values_list('sponsor', flat=True))
sponsor_set = Sponsor.objects.filter(id__in=sponsor_has_open_role).order_by('level')
open_roles = OpenRole.objects.filter(sponsor__in=sponsor_has_open_role).order_by('sponsor__level')
response_data = {"data": []}
for sponsor in sponsor_set:
jobs = []
for open_role in open_roles:
jobs.append({
"job_url": open_role.url,
"job_name": open_role.name,
"job_description_en_us": open_role.description_en_us,
"job_description_zh_hant": open_role.description_zh_hant,
})
response_data["data"].append({
"sponsor_logo_url": sponsor.logo.url if sponsor.logo else '',
"sponsor_name": sponsor.name,
"jobs": jobs
})
return Response(response_data)
| 37.033898 | 106 | 0.595423 | [
"MIT"
] | DoubleTakoMeat/pycon.tw | src/sponsors/api/views.py | 2,185 | Python |
import copy
import rdtest
import renderdoc as rd
class D3D11_Vertex_Attr_Zoo(rdtest.TestCase):
demos_test_name = 'D3D11_Vertex_Attr_Zoo'
def check_capture(self):
draw = self.find_draw("Draw")
self.check(draw is not None)
self.controller.SetFrameEvent(draw.eventId, False)
# Make an output so we can pick pixels
out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100), rd.ReplayOutputType.Texture)
self.check(out is not None)
ref = {
0: {
'SNORM': [1.0, -1.0, 1.0, -1.0],
'UNORM': [12345.0/65535.0, 6789.0/65535.0, 1234.0/65535.0, 567.0/65535.0],
'UINT': [12345, 6789, 1234, 567],
'ARRAY0': [1.0, 2.0],
'ARRAY1': [3.0, 4.0],
'ARRAY2': [5.0, 6.0],
'MATRIX0': [7.0, 8.0],
'MATRIX1': [9.0, 10.0],
},
1: {
'SNORM': [32766.0/32767.0, -32766.0/32767.0, 16000.0/32767.0, -16000.0/32767.0],
'UNORM': [56.0/65535.0, 7890.0/65535.0, 123.0/65535.0, 4567.0/65535.0],
'UINT': [56, 7890, 123, 4567],
'ARRAY0': [11.0, 12.0],
'ARRAY1': [13.0, 14.0],
'ARRAY2': [15.0, 16.0],
'MATRIX0': [17.0, 18.0],
'MATRIX1': [19.0, 20.0],
},
2: {
'SNORM': [5.0/32767.0, -5.0/32767.0, 0.0, 0.0],
'UNORM': [8765.0/65535.0, 43210.0/65535.0, 987.0/65535.0, 65432.0/65535.0],
'UINT': [8765, 43210, 987, 65432],
'ARRAY0': [21.0, 22.0],
'ARRAY1': [23.0, 24.0],
'ARRAY2': [25.0, 26.0],
'MATRIX0': [27.0, 28.0],
'MATRIX1': [29.0, 30.0],
},
}
in_ref = copy.deepcopy(ref)
vsout_ref = copy.deepcopy(ref)
gsout_ref = ref
vsout_ref[0]['SV_Position'] = [-0.5, 0.5, 0.0, 1.0]
gsout_ref[0]['SV_Position'] = [0.5, -0.5, 0.4, 1.2]
vsout_ref[1]['SV_Position'] = [0.0, -0.5, 0.0, 1.0]
gsout_ref[1]['SV_Position'] = [-0.5, 0.0, 0.4, 1.2]
vsout_ref[2]['SV_Position'] = [0.5, 0.5, 0.0, 1.0]
gsout_ref[2]['SV_Position'] = [0.5, 0.5, 0.4, 1.2]
self.check_mesh_data(in_ref, self.get_vsin(draw))
rdtest.log.success("Vertex input data is as expected")
self.check_mesh_data(vsout_ref, self.get_postvs(rd.MeshDataStage.VSOut))
rdtest.log.success("Vertex output data is as expected")
self.check_mesh_data(gsout_ref, self.get_postvs(rd.MeshDataStage.GSOut))
rdtest.log.success("Geometry output data is as expected")
pipe: rd.PipeState = self.controller.GetPipelineState()
tex = rd.TextureDisplay()
tex.resourceId = pipe.GetOutputTargets()[0].resourceId
out.SetTextureDisplay(tex)
texdetails = self.get_texture(tex.resourceId)
picked: rd.PixelValue = out.PickPixel(tex.resourceId, False,
int(texdetails.width / 2), int(texdetails.height / 2), 0, 0, 0)
if not rdtest.value_compare(picked.floatValue, [0.0, 1.0, 0.0, 1.0]):
raise rdtest.TestFailureException("Picked value {} doesn't match expectation".format(picked.floatValue))
rdtest.log.success("Triangle picked value is as expected")
out.Shutdown()
| 36.568421 | 130 | 0.527634 | [
"MIT"
] | DmitrySoshnikov/renderdoc | util/test/tests/D3D11/D3D11_Vertex_Attr_Zoo.py | 3,474 | Python |
"""
Django settings for orders project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mrmul5#_g^qb*mue%y$oz@gs$1t#sz_4oj#ea#hv!-s!xu$y4h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app.apps.AppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'orders.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'orders.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S'
}
| 24.638462 | 91 | 0.690603 | [
"MIT"
] | rafaeltardivo/micro-order | services/orders/orders/settings.py | 3,203 | Python |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for customremotes archives providing dl+archive URLs handling"""
import glob
import logging
import os
import os.path as op
import sys
from time import sleep
from unittest.mock import patch
from datalad.api import Dataset
from datalad.cmd import (
GitWitlessRunner,
KillOutput,
StdOutErrCapture,
WitlessRunner,
)
from datalad.support.exceptions import CommandError
from ...consts import ARCHIVES_SPECIAL_REMOTE
from ...support.annexrepo import AnnexRepo
from ...tests.test_archives import (
fn_archive_obscure,
fn_archive_obscure_ext,
fn_in_archive_obscure,
)
from ...tests.utils_pytest import (
abspath,
assert_equal,
assert_false,
assert_not_equal,
assert_not_in,
assert_raises,
assert_true,
chpwd,
eq_,
in_,
known_failure_githubci_win,
ok_,
serve_path_via_http,
swallow_logs,
with_tempfile,
with_tree,
)
from ...utils import unlink
from ..archives import (
ArchiveAnnexCustomRemote,
link_file_load,
)
# TODO: with_tree ATM for archives creates this nested top directory
# matching archive name, so it will be a/d/test.dat ... we don't want that probably
@known_failure_githubci_win
@with_tree(
tree=(('a.tar.gz', {'d': {fn_in_archive_obscure: '123'}}),
('simple.txt', '123'),
(fn_archive_obscure_ext, (('d', ((fn_in_archive_obscure, '123'),)),)),
(fn_archive_obscure, '123')))
@with_tempfile()
def test_basic_scenario(d=None, d2=None):
fn_archive, fn_extracted = fn_archive_obscure_ext, fn_archive_obscure
annex = AnnexRepo(d, backend='MD5E')
annex.init_remote(
ARCHIVES_SPECIAL_REMOTE,
['encryption=none', 'type=external', 'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE,
'autoenable=true'
])
assert annex.is_special_annex_remote(ARCHIVES_SPECIAL_REMOTE)
# We want two maximally obscure names, which are also different
assert(fn_extracted != fn_in_archive_obscure)
annex.add(fn_archive)
annex.commit(msg="Added tarball")
annex.add(fn_extracted)
annex.commit(msg="Added the load file")
# Operations with archive remote URL
# this is not using this class for its actual purpose
# being a special remote implementation
# likely all this functionality should be elsewhere
annexcr = ArchiveAnnexCustomRemote(annex=None, path=d)
# few quick tests for get_file_url
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat"), "dl+archive:xyz#path=a.dat")
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat", size=999), "dl+archive:xyz#path=a.dat&size=999")
# see https://github.com/datalad/datalad/issues/441#issuecomment-223376906
# old style
eq_(annexcr._parse_url("dl+archive:xyz/a.dat#size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz/a.dat"), ("xyz", "a.dat", {})) # old format without size
# new style
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat&size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat"), ("xyz", "a.dat", {})) # old format without size
file_url = annexcr.get_file_url(
archive_file=fn_archive,
file=fn_archive.replace('.tar.gz', '') + '/d/' + fn_in_archive_obscure)
annex.add_url_to_file(fn_extracted, file_url, ['--relaxed'])
annex.drop(fn_extracted)
list_of_remotes = annex.whereis(fn_extracted, output='descriptions')
in_('[%s]' % ARCHIVES_SPECIAL_REMOTE, list_of_remotes)
assert_false(annex.file_has_content(fn_extracted))
with swallow_logs(new_level=logging.INFO) as cml:
annex.get(fn_extracted)
# Hint users to the extraction cache (and to datalad clean)
cml.assert_logged(msg="datalad-archives special remote is using an "
"extraction", level="INFO", regex=False)
assert_true(annex.file_has_content(fn_extracted))
annex.rm_url(fn_extracted, file_url)
assert_raises(CommandError, annex.drop, fn_extracted)
annex.add_url_to_file(fn_extracted, file_url)
annex.drop(fn_extracted)
annex.get(fn_extracted)
annex.drop(fn_extracted) # so we don't get from this one next
# Let's create a clone and verify chain of getting file through the tarball
cloned_annex = AnnexRepo.clone(d, d2)
# we still need to enable manually atm that special remote for archives
# cloned_annex.enable_remote('annexed-archives')
assert_false(cloned_annex.file_has_content(fn_archive))
assert_false(cloned_annex.file_has_content(fn_extracted))
cloned_annex.get(fn_extracted)
assert_true(cloned_annex.file_has_content(fn_extracted))
# as a result it would also fetch tarball
assert_true(cloned_annex.file_has_content(fn_archive))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@known_failure_githubci_win
@with_tree(
tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}
)
def test_annex_get_from_subdir(topdir=None):
ds = Dataset(topdir)
ds.create(force=True)
ds.save('a.tar.gz')
ds.add_archive_content('a.tar.gz', delete=True)
fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)
with chpwd(op.join(topdir, 'a', 'd')):
runner = WitlessRunner()
runner.run(
['git', 'annex', 'drop', '--', fn_in_archive_obscure],
protocol=KillOutput) # run git annex drop
assert_false(ds.repo.file_has_content(fpath)) # and verify if file deleted from directory
runner.run(
['git', 'annex', 'get', '--', fn_in_archive_obscure],
protocol=KillOutput) # run git annex get
assert_true(ds.repo.file_has_content(fpath)) # and verify if file got into directory
def test_get_git_environ_adjusted():
gitrunner = GitWitlessRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
# test conversion of relevant env vars from relative_path to correct absolute_path
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
# test if other environment variables passed to function returned unaltered
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
# test import of sys_env if no environment passed to function
with patch.dict('os.environ', {'BOGUS': '123'}):
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["BOGUS"], "123")
def test_no_rdflib_loaded():
# rely on rdflib polluting stdout to see that it is not loaded whenever we load this remote
# since that adds 300ms delay for no immediate use
runner = WitlessRunner()
out = runner.run(
[sys.executable,
'-c',
'import datalad.customremotes.archives, sys; '
'print([k for k in sys.modules if k.startswith("rdflib")])'],
protocol=StdOutErrCapture)
# print cmo.out
assert_not_in("rdflib", out['stdout'])
assert_not_in("rdflib", out['stderr'])
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535' * 10000,
'bu3.dat': '8236397048205454767887168342849275422' * 10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir=None, topurl=None, outdir=None):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import (
add_archive_content,
create,
)
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.save(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(op.join(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
@known_failure_githubci_win
@with_tempfile
def test_link_file_load(tempfile=None):
tempfile2 = tempfile + '_'
with open(tempfile, 'w') as f:
f.write("LOAD")
link_file_load(tempfile, tempfile2) # this should work in general
ok_(os.path.exists(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
def inode(fname):
with open(fname) as fd:
return os.fstat(fd.fileno()).st_ino
def stats(fname, times=True):
"""Return stats on the file which should have been preserved"""
with open(fname) as fd:
st = os.fstat(fd.fileno())
stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)
if times:
return stats + (st.st_atime, st.st_mtime)
else:
return stats
# despite copystat mtime is not copied. TODO
# st.st_mtime)
# TODO: fix up the test to not rely on OS assumptions but rather
# first sense filesystem about linking support.
# For Yarik's Windows 10 VM test was failing under assumption that
# linking is not supported at all, but I guess it does.
if True: # on_linux or on_osx:
# above call should result in the hardlink
assert_equal(inode(tempfile), inode(tempfile2))
assert_equal(stats(tempfile), stats(tempfile2))
# and if we mock absence of .link
def raise_AttributeError(*args):
raise AttributeError("TEST")
with patch('os.link', raise_AttributeError):
with swallow_logs(logging.WARNING) as cm:
link_file_load(tempfile, tempfile2) # should still work
ok_("failed (TEST), copying file" in cm.out)
# should be a copy (after mocked call)
assert_not_equal(inode(tempfile), inode(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))
unlink(tempfile2) # TODO: next two with_tempfile
| 36.740741 | 116 | 0.660007 | [
"MIT"
] | soichih/datalad | datalad/customremotes/tests/test_archives.py | 10,912 | Python |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import Table, MetaData
from migrate.changeset.constraint import UniqueConstraint
meta = MetaData()
CONSTRAINT_NAME = "unique_domain_name"
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Load the database tables
domains_table = Table('domains', meta, autoload=True)
constraint = UniqueConstraint('name', 'deleted',
name=CONSTRAINT_NAME,
table=domains_table)
constraint.drop()
constraint = UniqueConstraint('name', 'deleted', 'pool_id',
name=CONSTRAINT_NAME,
table=domains_table)
constraint.create()
def downgrade(migrate_engine):
domains_table = Table('domains', meta, autoload=True)
constraint = UniqueConstraint('name', 'deleted', 'pool_id',
name=CONSTRAINT_NAME,
table=domains_table)
constraint.drop()
constraint = UniqueConstraint('name', 'deleted',
name=CONSTRAINT_NAME,
table=domains_table)
constraint.create()
| 33.4 | 75 | 0.641807 | [
"Apache-2.0"
] | infobloxopen/designate | designate/storage/impl_sqlalchemy/migrate_repo/versions/054_allow_duplicate_domains.py | 1,837 | Python |
from flask import Flask, jsonify, request, render_template, redirect, url_for
import datetime
from alarm import AlarmService
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('config.py')
# status file for motion detection
motionfile = "motion.txt"
# enable temperature sensor (if library is available)
try:
import w1thermsensor
temperature = w1thermsensor.W1ThermSensor()
except ImportError:
temperature = None
except:
temperature = None
@app.route('/')
def index():
return render_template('/index.html',
webcam = app.config["WEBCAM"],
sensor = app.config["SENSOR"],
temperature = app.config["TEMPERATURE"],
alarm = app.config["ALARM"],
# alarmOn = True,
switches = {'A': app.config["SWITCH_A"],
'B': app.config["SWITCH_B"],
'C': app.config["SWITCH_C"]})
@app.route("/status", methods=["GET"])
def status():
import os, time
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
details = {'time': timeString} # update time
# read temperature (if sensor is available)
if temperature and app.config["TEMPERATURE"]:
tempcelsius = temperature.get_temperature()
details.update({'temperature': tempcelsius})
# read status of PIR sensor
if os.path.exists(motionfile):
details['motion'] = time.ctime(os.path.getmtime(motionfile))
# take snapshot of webcam (max 1 per minute)
if app.config["WEBCAM"]:
import sh, os
timeString = now.strftime("%Y%m%d_%H%M")
snapshot = "snapshot-%s.jpg" % timeString
snapshot = os.path.join("static", "webcam", snapshot)
details['webcam'] = '/'+snapshot
snapshot = os.path.abspath(snapshot)
if not os.path.exists(snapshot):
sh.fswebcam("--title", "Home", "--save", snapshot)
return jsonify(**details)
@app.route("/alarm/<on>", methods=["POST"])
def alarm(on):
details = {}
if turnAlarm(on=="on"):
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
details.update({'time': timeString})
return jsonify(**details)
def turnAlarm(on):
"""write command into file - pass enable/disable command"""
print ("turn alarm on? %s" % on)
enable = on
disable = not on
alarm = AlarmService()
alarm.load()
alarm.save(enable, disable)
return True
@app.route("/turn/<switch>", methods=["POST"])
def turn(switch):
on = request.form['cmd']=="on"
details = {}
if turnSwitch(switch, on):
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
details.update({'time': timeString})
return jsonify(**details)
def turnSwitch(switch, on=False):
from elro import RemoteSwitch
devices = { 'A': 1, 'B': 2, 'C': 4, 'D': 8, 'E':16 }
device=devices.get(switch, 0)
if not device:
return False
# print "turn switch '%s' to '%s' - device#%d" % (switch, "on" if on else "off", device)
device = RemoteSwitch(device,
key=app.config["ELRO_KEY"],
pin=app.config["ELRO_PIN"])
if on:
device.switchOn()
else:
device.switchOff()
return True
if __name__ == "__main__":
app.run(host='0.0.0.0',
port=app.config["HTTP_PORT"],
debug=app.config["DEBUG"])
| 32.842593 | 93 | 0.581054 | [
"MIT"
] | tommykoch/pyhome | web/home.py | 3,547 | Python |
"""Support for RFXtrx lights."""
import logging
import RFXtrx as rfxtrxmod
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.const import CONF_DEVICES, STATE_ON
from homeassistant.core import callback
from . import (
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_SIGNAL_REPETITIONS,
DEFAULT_SIGNAL_REPETITIONS,
SIGNAL_EVENT,
RfxtrxCommandEntity,
get_device_id,
get_rfx_object,
)
from .const import COMMAND_OFF_LIST, COMMAND_ON_LIST
_LOGGER = logging.getLogger(__name__)
SUPPORT_RFXTRX = SUPPORT_BRIGHTNESS
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up config entry."""
discovery_info = config_entry.data
device_ids = set()
def supported(event):
return (
isinstance(event.device, rfxtrxmod.LightingDevice)
and event.device.known_to_be_dimmable
)
# Add switch from config file
entities = []
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
if device_id in device_ids:
continue
device_ids.add(device_id)
entity = RfxtrxLight(
event.device, device_id, entity_info[CONF_SIGNAL_REPETITIONS]
)
entities.append(entity)
async_add_entities(entities)
@callback
def light_update(event, device_id):
"""Handle light updates from the RFXtrx gateway."""
if not supported(event):
return
if device_id in device_ids:
return
device_ids.add(device_id)
_LOGGER.info(
"Added light (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxLight(
event.device, device_id, DEFAULT_SIGNAL_REPETITIONS, event=event
)
async_add_entities([entity])
# Subscribe to main RFXtrx events
if discovery_info[CONF_AUTOMATIC_ADD]:
hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_EVENT, light_update)
class RfxtrxLight(RfxtrxCommandEntity, LightEntity):
"""Representation of a RFXtrx light."""
_brightness = 0
async def async_added_to_hass(self):
"""Restore RFXtrx device state (ON/OFF)."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
self._brightness = old_state.attributes.get(ATTR_BRIGHTNESS)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_RFXTRX
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
self._state = True
if brightness is None:
await self._async_send(self._device.send_on)
self._brightness = 255
else:
await self._async_send(self._device.send_dim, brightness * 100 // 255)
self._brightness = brightness
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_send(self._device.send_off)
self._state = False
self._brightness = 0
self.async_write_ha_state()
def _apply_event(self, event):
"""Apply command from rfxtrx."""
super()._apply_event(event)
if event.values["Command"] in COMMAND_ON_LIST:
self._state = True
elif event.values["Command"] in COMMAND_OFF_LIST:
self._state = False
elif event.values["Command"] == "Set level":
self._brightness = event.values["Dim level"] * 255 // 100
self._state = self._brightness > 0
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
self._apply_event(event)
self.async_write_ha_state()
| 28.311765 | 84 | 0.633493 | [
"Apache-2.0"
] | 1e1/core-1 | homeassistant/components/rfxtrx/light.py | 4,813 | Python |
# TODO:
#
# A handler/formatter that can replace django.utils.log.AdminEmailHandler
#
# Needed:
# * A dump of locals() on each affected line in the stacktrace
| 22.714286 | 73 | 0.742138 | [
"Apache-2.0"
] | Uninett/python-logging-humio | src/humiologging/handlers/django.py | 159 | Python |
import unittest
import parameterized
import numpy as np
from rlutil.envs.tabular_cy import q_iteration, tabular_env
from rlutil.envs.tabular_cy import q_iteration_py
class QIterationTest(unittest.TestCase):
def setUp(self):
self.num_states = 128
self.env = tabular_env.RandomTabularEnv(num_states=self.num_states, num_actions=3, transitions_per_action=2)
self.env_selfloop = tabular_env.RandomTabularEnv(num_states=self.num_states, num_actions=3, transitions_per_action=2, self_loop=True)
def test_num_states(self):
self.assertEqual(self.env.num_states, self.num_states)
def test_selfloop(self):
transitions = self.env_selfloop.transitions(2, 0)
self.assertEqual(len(transitions), 1)
self.assertEqual(transitions[2], 1.0)
transitions = self.env_selfloop.transitions(2, 1)
self.assertEqual(len(transitions), 2)
def test_num_transitions(self):
transitions = self.env.transitions(0, 0)
self.assertEqual(len(transitions), 2)
for ns in transitions:
self.assertAlmostEqual(transitions[ns], 0.5)
def test_random_rollout(self):
self.env.reset()
for _ in range(30):
#self.env.render()
self.env.step(np.random.randint(0, self.env.num_actions))
def test_q_iteration(self):
params = {
'num_itrs': 1000,
'ent_wt': 0.0,
'discount': 0.95,
}
qvals = q_iteration.softq_iteration(self.env, **params)
self.env.reset()
rews = 0
for _ in range(200):
#self.env_small.render()
a_qvals = qvals[self.env.get_state()]
_, rew, _, _ = self.env.step(np.argmax(a_qvals))
rews += rew
self.assertGreater(rews, 0.0)
if __name__ == '__main__':
unittest.main()
| 33.071429 | 141 | 0.646868 | [
"MIT"
] | alexlioralexli/diagnosing_qlearning | rlutil/envs/tabular_cy/test_random_env.py | 1,852 | Python |
import json
from pathlib import Path
from typing import List
import criticus.py.edit_settings as es
def get_file(filename):
with open(filename, 'r', encoding='utf-8') as f:
text = f.readlines()
return text
def get_info_from_filename(filename):
filename = filename.split('/')[-1]
f = filename.split('_')
siglum = f[0]
reference_prefix = f[1].replace('.txt', '')
return siglum, reference_prefix
def format_reference(line: List[str], reference_prefix: str):
if reference_prefix[-1].isdigit():
seperator = '.'
else:
seperator = ''
verse = line.pop(0)
reference = verse.replace(':', '.')
reference = f'{reference_prefix}{seperator}{reference}'
return line, reference, verse
def convert_this_line(verse: str, verse_from, verse_to) -> bool:
if verse.replace(':', '.') in [verse_from.replace(':', '.'), verse_to.replace(':', '.')]:
return True
else:
return False
def build_token(word: str, index: str, siglum: str) -> dict:
return {
'index': index,
'siglum': siglum,
'reading': siglum,
'original': word,
'rule_match': [word],
't': word
}
def build_witneses(siglum: str, tokens: List[dict]):
return [{
'id': siglum,
'tokens': tokens
}]
def build_json(siglum, reference, witnesses: List[dict], line: list):
return {
'id': siglum,
'_id': siglum,
'transcription': siglum,
'transcription_siglum': siglum,
'siglum': siglum,
'context': reference,
'n': reference,
'text': ' '.join(line),
'witnesses': witnesses
}
def make_tokens(line, siglum) -> List[dict]:
tokens = []
for i, word in enumerate(line, 1):
index = f'{i*2}'
token = build_token(word, index, siglum)
tokens.append(token)
return tokens
def save_json(to_save: dict, reference: str, output_dir: str):
with open(f'{output_dir}/{reference}.json', 'w', encoding='utf-8') as f:
json.dump(to_save, f, ensure_ascii=False, indent=4)
def save_metadata(siglum, output_dir):
metadata = {'_id': siglum, 'siglum': siglum, 'id': siglum}
with open(f'{output_dir}/metadata.json', 'w', encoding='utf-8') as f:
json.dump(metadata, f, ensure_ascii=False)
def construct_json_transcription(line: List[str], reference: str, siglum: str, output_dir):
tokens = make_tokens(line, siglum)
witnesses = build_witneses(siglum, tokens)
complete_json = build_json(siglum, reference, witnesses, line)
save_json(complete_json, reference, output_dir)
def check_and_save_dirs(output_dir, siglum):
output_dir = Path(f'{output_dir}/{siglum}')
if not output_dir.exists():
Path.mkdir(output_dir, parents=True)
es.edit_settings('output_dir', output_dir.parent.absolute().as_posix())
return output_dir.absolute().as_posix()
def convert_text_to_json(
filename, output_dir, convert_all: bool,
reference_prefix: str, auto: bool, verse_from: str=None,
verse_to: str=None, siglum: str=None
):
filename = Path(filename).as_posix()
if auto:
siglum, reference_prefix = get_info_from_filename(filename)
output_dir = check_and_save_dirs(output_dir, siglum)
save_metadata(siglum, output_dir)
text = get_file(filename)
capture = False
for line in text:
line = line.split()
if len(line) > 1 and line[0][0].isdigit() and line[0][-1].isdigit(): # check that line contains a reference and a text unit and is not a heading
line, reference, verse = format_reference(line, reference_prefix)
if not convert_all and verse_to != verse_from: # handle a range of text units to convert
if convert_this_line(verse, verse_from, verse_to):
capture = not capture
construct_json_transcription(line, reference, siglum, output_dir)
elif capture:
construct_json_transcription(line, reference, siglum, output_dir)
elif not convert_all and verse_from == verse_to: # handle a single text unit
if convert_this_line(verse, verse_from, verse_to):
construct_json_transcription(line, reference, siglum, output_dir)
else:
construct_json_transcription(line, reference, siglum, output_dir) # handle all text units
| 35.629032 | 152 | 0.640109 | [
"MIT"
] | d-flood/Criticus | criticus/py/txt2json/convert_text_to_json.py | 4,418 | Python |
"""
Settings file, which is populated from the environment while enforcing common
use-case defaults.
"""
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# OR, the same with increased verbosity:
load_dotenv(dotenv_path, verbose=True)
DEBUG = True
if os.getenv('DEBUG', '').lower() in ['0', 'no', 'false']:
DEBUG = False
API_BIND_HOST = os.getenv('SERVICE_API_HOST', '0.0.0.0')
API_BIND_PORT = int(os.getenv('SERVICE_API_PORT', 8080))
SERVICE_NAME = os.getenv('SERVICE_NAME', 'app')
| 27.045455 | 81 | 0.72437 | [
"MIT"
] | theodesp/flask-golang-grpc-example | src/client/settings.py | 595 | Python |
# -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pylero.base_polarion import BasePolarion
class Change(BasePolarion):
"""Object to handle the Polarion WSDL tns3:Change class
Attributes:
creation (boolean)
date (dateTime)
diffs (ArrayOf_tns3_FieldDiff)
empty (boolean)
invalid (boolean)
revision (string)
user (string)
"""
_cls_suds_map = {"creation": "creation",
"date": "date",
"diffs": "diffs",
"empty": "empty",
"invalid": "invalid",
"revision": "revision",
"user": "user",
"uri": "_uri",
"_unresolved": "_unresolved"}
_obj_client = "tracker_client"
_obj_struct = "tns3:Change"
| 29.121212 | 59 | 0.556712 | [
"MIT"
] | RedHatQE/pylero | src/pylero/change.py | 961 | Python |
import tabulate
import matplotlib.pyplot as plt
import math
import numpy as np
Data=[]
data=[]
x_axis=[]
y_axis=[]
def falci(func,a,b,error_accept):
def f(x):
f = eval(func)
return f
error=b-a
if(f(a)*f(b)>=0 and a>=b):
print("wrong a,b")
return
x=a
n = 0
while error>=error_accept:
data.append(n)
n=n+1
data.append(str(a))
data.append(str(b))
x = (x*f(b)-b*f(x))/(f(b)-f(x))
data.append(str(x))
x_axis.append(x)
data.append(str(f(x)))
y_axis.append(f(x))
if f(x) == 0:
break
elif f(x) * f(b) < 0:
a = x
elif f(a) * f(x) < 0:
b = x
c=data.copy()
Data.append(c)
error=abs(b-a)
data.clear()
print("The root is %0.4f"%x)
falci('3*x-math.cos(x)-1',0,1,0.001)
print(tabulate.tabulate(Data,headers=['n','a','b','x','f(x)'],tablefmt='fancy_grid'))
x_axis=np.array(x_axis)
y_axis=np.array(y_axis)
plt.style.use('seaborn')
plt.plot(x_axis,y_axis,marker='*',color='deepskyblue',label=('f(x)'),linewidth='0.5')
plt.legend()
plt.title('Regular Falsi Method')
plt.xlabel('possible root values x',color='r')
plt.ylabel('function output (f(x))',color='r')
plt.show()
| 22.745763 | 86 | 0.517139 | [
"MIT"
] | Shakil-Mahmud-Programmer/Regular-Falsi-Method-or-False-Position-Method | 1st_formula.py | 1,342 | Python |
from .LocalDispatcher import LocalDispatcher, LocalDispatcherExecutionError
| 38 | 75 | 0.907895 | [
"MIT"
] | chilopodaHQ/chilopoda | src/lib/kombi/TaskHolder/Dispatcher/Local/__init__.py | 76 | Python |
import json
from django.core.urlresolvers import reverse
from rest_framework.compat import patterns, url
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.test import APITestCase
from fluent_contents.models import Placeholder
from bluebottle.test.factory_models.pages import PageFactory
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
class PageTestCase(APITestCase):
"""
Base class for test cases for ``page`` module.
The testing classes for ``page`` module related to the API must
subclass this.
"""
def setUp(self):
self.user = BlueBottleUserFactory.create()
self.page1 = PageFactory.create(author=self.user, language = 'nl')
placeholder1 = Placeholder.objects.create_for_object(self.page1, 'blog_contents')
placeholder1.save()
self.page2 = PageFactory.create(author=self.user, language = 'en')
placeholder2 = Placeholder.objects.create_for_object(self.page2, 'blog_contents')
class PageListTestCase(PageTestCase):
"""
Test case for ``PageList`` API view.
Endpoint: /api/pages/<language>/pages
"""
def test_api_pages_list_success(self):
"""
Ensure get request returns 200.
"""
response = self.client.get(reverse('page_list', kwargs={'language': 'nl'}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_api_pages_list_content(self):
"""
Ensure get request returns record with correct data.
"""
response = self.client.get(reverse('page_list', kwargs={'language': 'nl'}))
page = response.data['results'][0]
self.assertEqual(page['title'], self.page1.title)
self.assertEqual(page['language'], self.page1.language)
self.assertEqual(page['body'], '<!-- no items in placeholder \'blog_contents\' -->')
self.assertEqual(page['full_page'], self.page1.full_page)
class PageDetailTestCase(PageTestCase):
"""
Test case for ``PageDetail`` API view.
Endpoint: /api/pages/<language>/pages/<slug>
"""
def test_api_pages_detail_content(self):
"""
Ensure get request returns record with correct data.
"""
response = self.client.get(reverse('page_detail', kwargs={'language': 'en', 'slug': self.page2.slug}))
results = response.data
self.assertEqual(results['title'], self.page2.title)
self.assertEqual(results['language'], self.page2.language)
self.assertEqual(results['body'], '<!-- no items in placeholder \'blog_contents\' -->')
self.assertEqual(results['full_page'], self.page2.full_page) | 33.618421 | 104 | 0.749511 | [
"BSD-3-Clause"
] | maykinmedia/bluebottle | bluebottle/pages/tests/test_api.py | 2,555 | Python |
import wx
from . import UIManager
from . import UIControllerObject
from . import UIViewObject
from . import MainWindowController
class ToolBarController(UIControllerObject):
tid = 'toolbar_controller'
_singleton_per_parent = True
_ATTRIBUTES = {
'id': {'default_value': wx.ID_ANY,
'type': int
},
'pos': {'default_value': wx.DefaultPosition,
'type': wx.Point
},
'size': {'default_value': wx.DefaultSize,
'type': wx.Size
},
'style': {'default_value': wx.TB_FLAT|wx.TB_NODIVIDER,
'type': int
}
}
def __init__(self, **state):
super().__init__(**state)
class ToolBar(UIViewObject, wx.ToolBar):
tid = 'toolbar'
paneinfo = wx.aui.AuiPaneInfo().Name(tid).ToolbarPane().Top()
def __init__(self, controller_uid):
UIViewObject.__init__(self, controller_uid)
_UIM = UIManager()
controller = _UIM.get(self._controller_uid)
parent_controller_uid = _UIM._getparentuid(self._controller_uid)
parent_controller = _UIM.get(parent_controller_uid)
#wx.SystemOptions.SetOption("msw.remap", '0')
wx.ToolBar.__init__(self, parent_controller.view, controller.id,
controller.pos,
controller.size, controller.style
)
self.Realize()
if isinstance(parent_controller, MainWindowController):
mgr = wx.aui.AuiManager.GetManager(parent_controller.view)
mgr.AddPane(self, self.paneinfo)
mgr.Update()
| 30.636364 | 73 | 0.583383 | [
"Apache-2.0"
] | adrianopls/UIManager | tool_bar.py | 1,685 | Python |