gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import uuid from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging import six from sahara import conductor as c from sahara import context from sahara import exceptions from sahara.i18n import _ from sahara.i18n import _LE from sahara.plugins import base as plugin_base from sahara.service.edp import job_manager from sahara.service import trusts from sahara.utils import general as g from sahara.utils import remote from sahara.utils import rpc as rpc_utils conductor = c.API CONF = cfg.CONF LOG = logging.getLogger(__name__) INFRA = None def setup_ops(engine): global INFRA INFRA = engine class LocalOps(object): def provision_cluster(self, cluster_id): context.spawn("cluster-creating-%s" % cluster_id, _provision_cluster, cluster_id) def provision_scaled_cluster(self, cluster_id, node_group_id_map): context.spawn("cluster-scaling-%s" % cluster_id, _provision_scaled_cluster, cluster_id, node_group_id_map) def terminate_cluster(self, cluster_id): context.spawn("cluster-terminating-%s" % cluster_id, terminate_cluster, cluster_id) def run_edp_job(self, job_execution_id): context.spawn("Starting Job Execution %s" % job_execution_id, _run_edp_job, job_execution_id) def cancel_job_execution(self, job_execution_id): context.spawn("Canceling Job Execution %s" % job_execution_id, _cancel_job_execution, job_execution_id) def delete_job_execution(self, job_execution_id): context.spawn("Deleting Job Execution %s" % job_execution_id, _delete_job_execution, job_execution_id) def get_engine_type_and_version(self): return INFRA.get_type_and_version() class RemoteOps(rpc_utils.RPCClient): def __init__(self): target = messaging.Target(topic='sahara-ops', version='1.0') super(RemoteOps, self).__init__(target) def provision_cluster(self, cluster_id): self.cast('provision_cluster', cluster_id=cluster_id) def provision_scaled_cluster(self, cluster_id, node_group_id_map): self.cast('provision_scaled_cluster', cluster_id=cluster_id, node_group_id_map=node_group_id_map) def terminate_cluster(self, cluster_id): self.cast('terminate_cluster', cluster_id=cluster_id) def run_edp_job(self, job_execution_id): self.cast('run_edp_job', job_execution_id=job_execution_id) def cancel_job_execution(self, job_execution_id): self.cast('cancel_job_execution', job_execution_id=job_execution_id) def delete_job_execution(self, job_execution_id): self.cast('delete_job_execution', job_execution_id=job_execution_id) def get_engine_type_and_version(self): return self.call('get_engine_type_and_version') def request_context(func): @functools.wraps(func) def wrapped(self, ctx, *args, **kwargs): context.set_ctx(context.Context(**ctx)) return func(self, *args, **kwargs) return wrapped class OpsServer(rpc_utils.RPCServer): def __init__(self): target = messaging.Target(topic='sahara-ops', server=uuid.uuid4(), version='1.0') super(OpsServer, self).__init__(target) @request_context def provision_cluster(self, cluster_id): _provision_cluster(cluster_id) @request_context def provision_scaled_cluster(self, cluster_id, node_group_id_map): _provision_scaled_cluster(cluster_id, node_group_id_map) @request_context def terminate_cluster(self, cluster_id): terminate_cluster(cluster_id) @request_context def run_edp_job(self, job_execution_id): _run_edp_job(job_execution_id) @request_context def cancel_job_execution(self, job_execution_id): _cancel_job_execution(job_execution_id) @request_context def delete_job_execution(self, job_execution_id): _delete_job_execution(job_execution_id) @request_context def get_engine_type_and_version(self): return INFRA.get_type_and_version() def ops_error_handler(description): def decorator(f): @functools.wraps(f) def wrapper(cluster_id, *args, **kwds): ctx = context.ctx() try: # Clearing status description before executing g.change_cluster_status_description(cluster_id, "") f(cluster_id, *args, **kwds) except Exception as ex: # something happened during cluster operation cluster = conductor.cluster_get(ctx, cluster_id) # check if cluster still exists (it might have been removed) if cluster is None or cluster.status == 'Deleting': LOG.debug( "Cluster id={id} was deleted or marked for " "deletion. Canceling current operation.".format( id=cluster_id)) return msg = six.text_type(ex) LOG.error( _LE("Error during operating on cluster {name} (reason: " "{reason})").format(name=cluster.name, reason=msg)) try: # trying to rollback desc = description.format(reason=msg) if _rollback_cluster(cluster, ex): g.change_cluster_status(cluster, "Active", desc) else: g.change_cluster_status(cluster, "Error", desc) except Exception as rex: cluster = conductor.cluster_get(ctx, cluster_id) # check if cluster still exists (it might have been # removed during rollback) if cluster is None or cluster.status == 'Deleting': LOG.debug( "Cluster id={id} was deleted or marked for " "deletion. Canceling current operation." .format(id=cluster_id)) return LOG.error( _LE("Error during rollback of cluster {name} (reason:" " {reason})").format(name=cluster.name, reason=six.text_type(rex))) desc = "{0}, {1}".format(msg, six.text_type(rex)) g.change_cluster_status( cluster, "Error", description.format(reason=desc)) return wrapper return decorator def _rollback_cluster(cluster, reason): context.set_step_type(_("Engine: rollback cluster")) return INFRA.rollback_cluster(cluster, reason) def _prepare_provisioning(cluster_id): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) for nodegroup in cluster.node_groups: update_dict = {} update_dict["image_username"] = INFRA.get_node_group_image_username( nodegroup) conductor.node_group_update(ctx, nodegroup, update_dict) cluster = conductor.cluster_get(ctx, cluster_id) return ctx, cluster, plugin def _update_sahara_info(ctx, cluster): sahara_info = { 'infrastructure_engine': INFRA.get_type_and_version(), 'remote': remote.get_remote_type_and_version()} return conductor.cluster_update( ctx, cluster, {'sahara_info': sahara_info}) @ops_error_handler( _("Creating cluster failed for the following reason(s): {reason}")) def _provision_cluster(cluster_id): ctx, cluster, plugin = _prepare_provisioning(cluster_id) cluster = _update_sahara_info(ctx, cluster) if CONF.use_identity_api_v3 and cluster.is_transient: trusts.create_trust_for_cluster(cluster) # updating cluster infra cluster = g.change_cluster_status(cluster, "InfraUpdating") plugin.update_infra(cluster) # creating instances and configuring them cluster = conductor.cluster_get(ctx, cluster_id) context.set_step_type(_("Engine: create cluster")) INFRA.create_cluster(cluster) # configure cluster cluster = g.change_cluster_status(cluster, "Configuring") context.set_step_type(_("Plugin: configure cluster")) plugin.configure_cluster(cluster) # starting prepared and configured cluster cluster = g.change_cluster_status(cluster, "Starting") context.set_step_type(_("Plugin: start cluster")) plugin.start_cluster(cluster) # cluster is now up and ready cluster = g.change_cluster_status(cluster, "Active") # schedule execution pending job for cluster for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id): job_manager.run_job(je.id) @ops_error_handler( _("Scaling cluster failed for the following reason(s): {reason}")) def _provision_scaled_cluster(cluster_id, node_group_id_map): ctx, cluster, plugin = _prepare_provisioning(cluster_id) # Decommissioning surplus nodes with the plugin cluster = g.change_cluster_status(cluster, "Decommissioning") instances_to_delete = [] for node_group in cluster.node_groups: new_count = node_group_id_map[node_group.id] if new_count < node_group.count: instances_to_delete += node_group.instances[new_count: node_group.count] if instances_to_delete: context.set_step_type(_("Plugin: decommission cluster")) plugin.decommission_nodes(cluster, instances_to_delete) # Scaling infrastructure cluster = g.change_cluster_status(cluster, "Scaling") context.set_step_type(_("Engine: scale cluster")) instance_ids = INFRA.scale_cluster(cluster, node_group_id_map) # Setting up new nodes with the plugin if instance_ids: cluster = g.change_cluster_status(cluster, "Configuring") instances = g.get_instances(cluster, instance_ids) context.set_step_type(_("Plugin: scale cluster")) plugin.scale_cluster(cluster, instances) g.change_cluster_status(cluster, "Active") @ops_error_handler( _("Terminating cluster failed for the following reason(s): {reason}")) def terminate_cluster(cluster_id): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) context.set_step_type(_("Plugin: shutdown cluster")) plugin.on_terminate_cluster(cluster) context.set_step_type(_("Engine: shutdown cluster")) INFRA.shutdown_cluster(cluster) if CONF.use_identity_api_v3: trusts.delete_trust_from_cluster(cluster) conductor.cluster_destroy(ctx, cluster) def _run_edp_job(job_execution_id): job_manager.run_job(job_execution_id) def _cancel_job_execution(job_execution_id): job_manager.cancel_job(job_execution_id) def _delete_job_execution(job_execution_id): try: job_execution = job_manager.cancel_job(job_execution_id) if not job_execution: # job_execution was deleted already, nothing to do return except exceptions.CancelingFailed: LOG.error(_LE("Job execution {j_id} can't be cancelled in time. " "Deleting it anyway.").format(j_id=job_execution_id)) conductor.job_execution_destroy(context.ctx(), job_execution_id)
import sys import operator import numpy as np import matplotlib.pyplot as plt import itertools, functools import re import argparse from mdp_builder import Builder """ Grid Layout grid[0][0] = num_states grid[0][1] = num_actions """ def load_args(): parser = argparse.ArgumentParser(description='Description of your program') parser.add_argument('-t', '--timesteps', default=0, help='horizon length, discarded if discount provided', required=False) parser.add_argument('-g', '--gamma', default=0, help='discount factor', required=False) parser.add_argument('-f', '--input_file', default='MDP1.txt', help='input file with MDP description') parser.add_argument('-e', '--epsilon', default=0, help='epsilon, or early stopping conditions', required=False) parser.add_argument('-i', '--intermediate', default=False, type=bool, help='print out intermeiate policies/value functions while it learns', required=False) parser.add_argument('-b', '--build', default=False, type=bool, help='use the parking lot planner') parser.add_argument('-s', '--spaces', default=8, type=int, help='number of spaces in each row of the parking lot') parser.add_argument('-c', '--rcrash', default=-10, type=int, help='penalty on crashing into another car') parser.add_argument('-d', '--rdisabled', default=-5, type=int, help='penalty on parking in a handicapped spot') parser.add_argument('-r', '--run_trial', default=False, type=bool, help='try policy with a given starting position') args = parser.parse_args() return args def load_data(path): with open(path, 'rb') as f: train = f.readlines() train = [line.strip('\n') for line in train] train = [re.sub(r'[^\x00-\x7f]',r'', line) for line in train] train[0] = [int(a) for a in train[0].split(' ')] num_states, num_actions = train[0] print num_states, num_actions lines = num_actions * num_states + num_actions grid = [] for i in range(1, lines+(num_actions-1)): if ((i-1) % (num_states+1) is not 0) and (len(train)-1 >= i): if train[i] == "": pass else: print "line: ", train[i] grid.append([float(n) for n in train[i].split(' ')]) train[i] = [float(n) for n in train[i].split(' ')] """ start = 1 for i in range(num_actions): start = (i*num_states)+1 print "taking input from lines: {} to {}".format(start, (i+1)*num_states+2) for k in range(start, (i+1)*num_states+2): print "line ", k, " : ", train[k] if k == start: g = 1 else: grid.append([float(n) for n in train[k].split(' ')]) train[k] = [float(n) for n in train[k].split(' ')] """ print len(train) actions = [] for i in range(num_actions): actions.append(grid[(i*num_states):((1+i)*num_states)]) train = np.array(train) return train, actions def build_mdp(args): builder = Builder(args.spaces, 0, args.rcrash, args.rdisabled, './parking.txt') builder.build() class MDP(object): def __init__(self, args, grid, actions): self.args = args self.grid = grid self.gamma = float(args.gamma) self.num_states, self.num_actions = grid[0] self.actions = actions self.rewards = grid[-1] if type(self.rewards) is str: self.rewards = self.rewards.split(' ') self.rewards = map(float, self.rewards) self.Utility = [x for x in self.rewards] self.print_attrs() self.timesteps = int(args.timesteps) if (args.epsilon is 0) and (self.gamma > 0): self.epsilon = ((1*10**-10)*((1-self.gamma)**2))/(2*(self.gamma**2)) else: self.epsilon = float(args.epsilon) def print_attrs(self): print "number of states: {}\n".format(self.num_states) print "number of possible actions: {}".format(self.num_actions) print "rewards per state: {}".format(self.rewards) print "grid: {}".format(len(grid)) def Reward(self, state, action=None): return self.rewards[state] def T(self, state, action, next_state): """ returns probability of going to state X from state Y """ return self.actions[action][state][next_state] """ Value Iteration algorithm: U1(state) = Reward(state) Ui+1(state) = Reward(state) = gamma*max(for all next states (T(state, action, next_state)(U(i)))) computes the utility of each state when considering all next states """ def util(self, state): p_actions = [] max_p, sum_p = 0, 0 for action in range(self.num_actions): sum_p = 0 p_actions = [] for next_state in range(self.num_states): p_actions.append((self.T(state, action, next_state), action, next_state)) for p in p_actions: sum_p += p[0] * self.Utility[p[2]] if (sum_p > max_p) or (max_p is 0): max_p = sum_p if self.timesteps > 0: return max_p + self.Reward(state) else: return self.gamma*max_p + self.Reward(state) """ Q iterates through the algorithm until the utility update is less than delta as the utility of each state is updated, the difference between the old and the new utility functions can be taken, this is compared against the delta equation """ def Q(self) : max_state = 1 if self.timesteps == 0: print "searching infinite horizon" while max_state > self.epsilon: max_state = 0 new_util = [0]*self.num_states next_prob = [] for state in range(self.num_states): state_util = self.util(state) if state_util is not None: max_state = max(max_state, abs(self.Utility[state] - state_util)) new_util[state] = state_util self.Utility = new_util else: print "searching on finite horizon" # for finite horizon utilities, policies = [], [] for it in range(self.timesteps): for s in range(it): new_util = [0]*self.num_states next_prob = [] for state in range(self.num_states): state_util = self.util(state) if state_util is not None: max_state = max(max_state, abs(self.Utility[state] - state_util)) new_util[state] = state_util self.Utility = new_util if self.args.intermediate: print "INTERMEDIATE\n\n" utilities.append(self.Utility) policies.append(self.policy()) if self.args.intermediate: return utilities, policies else: return self.Utility, self.policy() return self.Utility """ finds the best policy based on the current utility function simply returns the best next state: next state with the highest utility """ def policy(self): proto_policy = [] def argmax(state): res = {} for action in range(self.num_actions): res[action] = 0 self.p_states = [] for next_state in range(self.num_states): self.p_states.append((self.T(state, action, next_state), action, next_state)) for p in self.p_states: res[action] += p[0] * self.Utility[p[2]] return (max(res.items(), key=operator.itemgetter(1))[0] if res else None) for state in range(self.num_states): proto_policy.append(argmax(state)) return proto_policy def run_trial(self, U, P, start): state = start reward = 0 i = 0 for action in P: print "STEP {}".format(i) if int(float(action)) == 0: #park reward += self.rewards[state] print "TAKING EXIT\n\nReward of {}\n\nDONE".format(reward) return if int(float(action)) == 1: print "EXITING\n\nReward of {}\n\nDONE".format(reward) return if int(float(action)) == 2: reward -= 1 if state < 40: state += 3 else: state = 0 i += 1 if __name__ == '__main__': args = load_args() if args.build == True: build_mdp(args) sys.exit(0) grid, actions = load_data(args.input_file) mdp = MDP(args, grid, actions) if int(args.timesteps) > 0: finite = True else: finite = False if finite is False: Utility = mdp.Q() Policy = mdp.policy() U = ["%.5f" % v for v in Utility] P = ["%.5f" % v for v in Policy] print "**************************************\nPolicy: {}\nValue : {}\n**************************************".format(P, U) if args.run_trial: start = 1 print "\n\nRUNNING POLICY STARTING AT STATE {}".format(start) mdp.run_trial(U, P, start) else: print "***********************************" Utility, Policy = mdp.Q() if args.intermediate: for i in range(int(args.timesteps)): U = ["%.5f" % v for v in Utility[i]] print U for i in range(int(args.timesteps)): P = ["%.5f" % v for v in Policy[i]] print P else: U = ["%.5f" % v for v in Utility] P = ["%.5f" % v for v in Policy] print "Finite Utility : {}".format(U) print "Finite Policy : {}\n".format(P)
from __future__ import print_function import sys import re from collections import defaultdict if sys.version_info.major > 2: # alias dict.items() as dict.iteritems() in python 3+ class compat_dict(defaultdict): pass compat_dict.iteritems = defaultdict.items defaultdict = compat_dict # add xrange to python 3+ xrange = range graphviz_items = [] vindex_count = -1 def new_index(): global vindex_count vindex_count += 1 return vindex_count def init(random_seed=None): pass class SimpleConcreteDim(object): def __init__(self, nrows, ncols, inferred): self.nrows = nrows self.ncols = ncols self.inferred = inferred def __getitem__(self, key): return [self.nrows, self.ncols][key] def __iter__(self): return iter([self.nrows, self.ncols]) def __str__(self): return 'Dim(%s,%s)' % (self.nrows, self.ncols) def __eq__(self, other): return isinstance(other, SimpleConcreteDim) and self.nrows==other.nrows and self.ncols==other.ncols def __ne__(self, other): return not self==other def __hash__(self): return hash((self.nrows, self.ncols)) def isvalid(self): return True def invalid(self): return False class InvalidConcreteDim(object): def __init__(self, a_dim=None, b_dim=None): self.a_dim = a_dim self.b_dim = b_dim def __getitem__(self, key): return None def __repr__(self): if self.a_dim is None and self.b_dim is None: return 'InvalidDim' else: return 'InvalidDim(%s, %s)' % (self.a_dim, self.b_dim) def __str__(self): return repr(self) def isvalid(self): return False def invalid(self): return True InvalidDim = InvalidConcreteDim() def make_dim(a, b=None, inferred=False): if isinstance(a, InvalidConcreteDim): assert b is None return a elif isinstance(a, SimpleConcreteDim): assert b is None return SimpleConcreteDim(a.nrows, a.ncols, inferred) elif isinstance(a, tuple): assert b is None assert len(a) == 2, str(a) (nrows, ncols) = a return SimpleConcreteDim(nrows, ncols, inferred) elif b is None: assert isinstance(a, int) or (isinstance(a, float) and int(a) == a) return SimpleConcreteDim(a, 1, inferred) else: assert isinstance(a, int) or (isinstance(a, float) and int(a) == a) assert isinstance(b, int) or (isinstance(b, float) and int(b) == b) return SimpleConcreteDim(a, b, inferred) def ensure_freshness(a): if a.cg_version != _cg.version(): raise ValueError("Attempt to use a stale expression.") def copy_dim(a): if a.dim.isvalid(): return make_dim(a.dim, inferred=True) else: return InvalidDim def ensure_same_dim(a,b): if a.dim.invalid() or b.dim.invalid(): return InvalidDim elif a.dim==b.dim: return copy_dim(a) else: return InvalidConcreteDim(a.dim,b.dim) def ensure_mul_dim(a,b): if a.dim.invalid() or b.dim.invalid(): return InvalidDim elif a.dim[1]==b.dim[0]: return make_dim(a.dim[0], b.dim[1], inferred=True) else: return InvalidConcreteDim(a.dim,b.dim) def ensure_all_same_dim(xs): for x in xs: if x.dim.invalid(): return InvalidDim dim0 = xs[0].dim for x in xs[1:]: if dim0 != x.dim: return InvalidConcreteDim(dim0, x.dim) return copy_dim(xs[0]) def _add(a, b): return GVExpr('add', [a,b], ensure_same_dim(a,b)) def _mul(a, b): return GVExpr('mul', [a,b], ensure_mul_dim(a,b)) def _neg(a): return GVExpr('neg', [a], copy_dim(a)) def _scalarsub(a, b): return GVExpr('scalarsub', [a,b], copy_dim(b)) def _cadd(a, b): return GVExpr('cadd', [a,b], copy_dim(a)) def _cmul(a, b): return GVExpr('cmul', [a,b], copy_dim(a)) def _cdiv(a, b): return GVExpr('cdiv', [a,b], copy_dim(a)) class Expression(object): #{{{ def __init__(self, name, args, dim): self.name = name self.args = args self.dim = dim self.vindex = new_index() self.cg_version = cg().version() def cg(self): return cg() def get_cg_version(self): return self.cg_version def get_vindex(self): return self.vindex def __repr__(self): return str(self) def __str__(self): return '%s([%s], %s, %s/%s)' % (self.name, ', '.join(map(str,self.args)), self.dim, self.vindex, self.cg_version) #"expression %s/%s" % (self.vindex, self.cg_version) def __getitem__(self, i): return lookup(self, i) def __getslice__(self, i, j): return None def scalar_value(self, recalculate=False): return 0.0 def vec_value(self, recalculate=False): return [] def npvalue(self, recalculate=False): return None def value(self, recalculate=False): return None def forward(self, recalculate=False): return None def set(self, x): pass def batch(self, i): return lookup_batch(self, i) def zero(self): return self def backward(self): pass def __add__(self, other): if isinstance(self, Expression) and isinstance(other, Expression): return _add(self,other) elif isinstance(self, (int,float)) or isinstance(other, (int,float)): return _cadd(self, other) else: raise NotImplementedError('self=%s, other=%s' % (self, other)) def __mul__(self, other): if isinstance(self, Expression) and isinstance(other, Expression): return _mul(self,other) elif isinstance(self, (int,float)) or isinstance(other, (int,float)): return _cmul(self, other) else: raise NotImplementedError('self=%s, other=%s' % (self, other)) def __div__(self, other): if isinstance(self, Expression) and isinstance(other, (int,float)): return _cdiv(self, other) else: raise NotImplementedError() def __neg__(self): return _neg(self) def __sub__(self, other): if isinstance(self,Expression) and isinstance(other,Expression): return self+(-other) elif isinstance(self,(int,float)) and isinstance(other,Expression): return _scalarsub(self, other) elif isinstance(self,Expression) and isinstance(other,(int, float)): return _neg(_scalarsub(other, self)) else: raise NotImplementedError() def init_row(self, i, row): pass def init_from_array(self, *args, **kwargs): pass def set_updated(self, *args, **kwargs): pass def GVExpr(name, args, dim): e = Expression(name, args, dim) graphviz_items.append(e) return e class Model(object): def add_parameters(self, dim, scale=0, *args, **kwargs): assert(isinstance(dim,(tuple,int))) pp = Expression('parameters', [dim], make_dim(dim)) return pp def add_lookup_parameters(self, dim, *args, **kwargs): assert(isinstance(dim, tuple)) pp = Expression('lookup_parameters', [dim], make_dim(dim[1])) return pp def save_all(self, fname): pass def load_all(self, fname): pass def save(self, fname): pass def load(self, fname): pass SECRET = 923148 #_cg = ComputationGraph(SECRET) def cg_version(): return _cg._cg_version def renew_cg(immediate_compute=False, check_validity=False): return _cg.renew(immediate_compute, check_validity) def cg(): global _cg return _cg class ComputationGraph(object): def __init__(self, guard=0): if guard != SECRET: raise RuntimeError("Do not instantiate ComputationGraph directly. Use pydynet.cg()") self._cg_version = 0 def renew(self, immediate_compute=False, check_validity=False): vindex_count = -1 del graphviz_items[:] return self def version(self): return self._cg_version def parameters(self, params): graphviz_items.append(params) return params def forward_scalar(self): return 0.0 def inc_forward_scalar(self): return 0.0 def forward_vec(self): return [] def inc_forward_vec(self): return [] def forward(self): return None def inc_forward(self): return None def backward(self): return None _cg = ComputationGraph(SECRET) # }}} def parameter(p): graphviz_items.append(p) return p def scalarInput(s): return GVExpr('scalarInput', [s], make_dim(1, inferred=True)) def vecInput(dim): return GVExpr('vecInput', [dim], make_dim(dim)) def inputVector(v): return GVExpr('inputVector', [v], make_dim(len(v), inferred=True)) def matInput(d1, d2): return GVExpr('matInput', [d1, d2], make_dim(d1, d2)) def inputMatrix(v, d): return GVExpr('inputMatrix', [v, d], make_dim(d, inferred=True)) def lookup(p, index=0, update=True): return GVExpr('lookup', [p, index, update], p.dim) def lookup_batch(p, indices, update=True): return GVExpr('lookup_batch', [p, indices, update], p.dim) def pick(a, index=0): return GVExpr('pick', [a, index], make_dim(1, inferred=True)) def pick_batch(a, indices): return GVExpr('pick_batch', [a, indices], make_dim(len(indices), inferred=True)) def hinge(x, index, m=1.0): return GVExpr('hinge', [x, index, m], copy_dim(x)) def nobackprop(x): return GVExpr('nobackprop', [x], copy_dim(x)) def flip_gradient(x): return GVExpr('flip_gradient', [x], copy_dim(x)) # binary-exp def cdiv(x, y): return GVExpr('cdiv', [x,y], ensure_same_dim(x,y)) def colwise_add(x, y): if x.dim.invalid() or y.dim.invalid(): d = InvalidDim elif x.dim[0] == y.dim[0] and y.dim[1] == 1: d = copy_dim(x) else: d = InvalidConcreteDim(x.dim, y.dim) return GVExpr('colwise_add', [x,y], d) def trace_of_product(x, y): return GVExpr('trace_of_product', [x,y], ensure_same_dim(x,y)) def cmult(x, y): return GVExpr('cmult', [x,y], ensure_same_dim(x,y)) def dot_product(x, y): return GVExpr('dot_product', [x,y], ensure_same_dim(x,y)) def squared_distance(x, y): return GVExpr('squared_distance', [x,y], ensure_same_dim(x,y)) def l1_distance(x, y): return GVExpr('l1_distance', [x,y], ensure_same_dim(x,y)) def binary_log_loss(x, y): return GVExpr('binary_log_loss', [x,y], ensure_same_dim(x,y)) def conv1d_narrow(x, y): if x.dim.invalid() or y.dim.invalid(): d = InvalidDim elif x.dim[0] != y.dim[0]: d = InvalidConcreteDim(x.dim, y.dim) else: d = make_dim(x.dim[0], x.dim[1] - y.dim[1] + 1) return GVExpr('conv1d_narrow', [x,y], d) def conv1d_wide(x, y): if x.dim.invalid() or y.dim.invalid(): d = InvalidDim elif x.dim[0] != y.dim[0]: d = InvalidConcreteDim(x.dim, y.dim) else: d = make_dim(x.dim[0], x.dim[1] + y.dim[1] - 1) return GVExpr('conv1d_wide', [x,y], d) def filter1d_narrow(x, y): if x.dim.invalid() or y.dim.invalid(): d = InvalidDim elif x.dim[0] != y.dim[0]: d = InvalidConcreteDim(x.dim, y.dim) else: d = make_dim(x.dim[0], x.dim[1] - y.dim[1] + 1) return GVExpr('filter1d_narrow', [x,y], d) # unary-exp def tanh(x): return GVExpr('tanh', [x], copy_dim(x)) def exp(x): return GVExpr('exp', [x], copy_dim(x)) def square(x): return GVExpr('square', [x], copy_dim(x)) def sqrt(x): return GVExpr('sqrt', [x], copy_dim(x)) def erf(x): return GVExpr('erf', [x], copy_dim(x)) def cube(x): return GVExpr('cube', [x], copy_dim(x)) def log(x): return GVExpr('log', [x], copy_dim(x)) def lgamma(x): return GVExpr('lgamma', [x], copy_dim(x)) def logistic(x): return GVExpr('logistic', [x], copy_dim(x)) def rectify(x): return GVExpr('rectify', [x], copy_dim(x)) def log_softmax(x, restrict=None): return GVExpr('log_softmax', [x,restrict], copy_dim(x)) def softmax(x): return GVExpr('softmax', [x], copy_dim(x)) def softsign(x): return GVExpr('softsign', [x], copy_dim(x)) def pow(x, y): return GVExpr('pow', [x,y], ensure_same_dim(x,y)) def bmin(x, y): return GVExpr('bmin', [x,y], ensure_same_dim(x,y)) def bmax(x, y): return GVExpr('bmax', [x,y], ensure_same_dim(x,y)) def transpose(x): return GVExpr('transpose', [x], make_dim(x.dim[1], x.dim[0]) if x.dim.isvalid() else InvalidDim) def sum_cols(x): return GVExpr('sum_cols', [x], make_dim(x.dim[0],1) if x.dim.isvalid() else InvalidDim) def sum_batches(x): return GVExpr('sum_batches', [x], copy_dim(x)) #expr-opt def fold_rows(x, nrows=2): if x.dim.invalid(): d = InvalidDim elif x.dim[0] != nrows: d = InvalidConcreteDim(x.dim, nrows) else: d = make_dim(1, x.dim[1]) return GVExpr('fold_rows', [x,nrows], d) def pairwise_rank_loss(x, y, m=1.0): return GVExpr('pairwise_rank_loss', [x,y,m], ensure_same_dim(x,y)) def poisson_loss(x, y): return GVExpr('poisson_loss', [x,y], copy_dim(x)) def huber_distance(x, y, c=1.345): return GVExpr('huber_distance', [x,y,c], ensure_same_dim(x,y)) #expr-unsigned def kmax_pooling(x, k): return GVExpr('kmax_pooling', [x,k], make_dim(x.dim[0], k) if x.dim.isvalid() else InvalidDim) def pickneglogsoftmax(x, v): return GVExpr('pickneglogsoftmax', [x,v], make_dim(1, inferred=True)) def pickneglogsoftmax_batch(x, vs): return GVExpr('pickneglogsoftmax_batch', [x,vs], make_dim(len(vs), inferred=True)) def kmh_ngram(x, n): return GVExpr('kmh_ngram', [x,n], make_dim(x.dim[0], x.dim[1]-n+1) if x.dim.isvalid() else InvalidDim) def pickrange(x, v, u): return GVExpr('pickrange', [x,v,u], make_dim(u-v, x.dim[1]) if x.dim.isvalid() else InvalidDim) #expr-float def noise(x, stddev): return GVExpr('noise', [x,stddev], copy_dim(x)) def dropout(x, p): return GVExpr('dropout', [x,p], copy_dim(x)) def block_dropout(x, p): return GVExpr('block_dropout', [x,p], copy_dim(x)) #expr-dim def reshape(x, d): return GVExpr('reshape', [x,d], make_dim(d)) def esum(xs): return GVExpr('esum', xs, ensure_all_same_dim(xs)) def average(xs): return GVExpr('average', xs, ensure_all_same_dim(xs)) def emax(xs): return GVExpr('emax', xs, ensure_all_same_dim(xs)) def concatenate_cols(xs): if any(x.dim.invalid() for x in xs): dim = InvalidDim else: nrows = xs[0].dim[0] ncols = xs[0].dim[1] for x in xs: ncols += x.dim[1] nrows = nrows if nrows == x.dim[0] else -1 dim = make_dim(nrows, ncols) if nrows >= 0 else InvalidDim return GVExpr('concatenate_cols', xs, dim) def concatenate(xs): if any(x.dim.invalid() for x in xs): dim = InvalidDim else: nrows = xs[0].dim[0] ncols = xs[0].dim[1] for x in xs[1:]: nrows += x.dim[0] ncols = ncols if ncols == x.dim[1] else -1 dim = make_dim(nrows, ncols) if ncols >= 0 else InvalidDim return GVExpr('concatenate', xs, dim) def affine_transform(xs): if any(x.dim.invalid() for x in xs): dim = InvalidDim elif all(ensure_mul_dim(a,b)==xs[0].dim for a,b in zip(xs[1::2],xs[2::2])): dim = xs[0].dim else: dim = InvalidDim return GVExpr('affine_transform', xs, dim) builder_num = -1 def new_builder_num(): global builder_num builder_num += 1 return builder_num class _RNNBuilder(object): def set_dropout(self, f): pass def disable_dropout(self): pass def new_graph(self): self.cg_version = _cg.version() self.builder_version = new_builder_num() def start_new_sequence(self, es=None): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") def add_input(self, e): ensure_freshness(e) if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") return Expression.from_cexpr(self.cg_version, self.thisptr.add_input(e.c())) def add_input_to_prev(self, prev, e): ensure_freshness(e) if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") return Expression.from_cexpr(self.cg_version, self.thisptr.add_input(prev, e.c())) def rewind_one_step(self): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") self.thisptr.rewind_one_step() def back(self): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") return Expression.from_cexpr(self.cg_version, self.thisptr.back()) def final_h(self): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") res = [] #def CExpression cexp cexps = self.thisptr.final_h() for cexp in cexps: res.append(Expression.from_cexpr(self.cg_version, cexp)) return res def final_s(self): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") res = [] #def CExpression cexp cexps = self.thisptr.final_s() for cexp in cexps: res.append(Expression.from_cexpr(self.cg_version, cexp)) return res def get_h(self, i): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") res = [] #def CExpression cexp cexps = self.thisptr.get_h(i) for cexp in cexps: res.append(Expression.from_cexpr(self.cg_version, cexp)) return res def get_s(self, i): if self.cg_version != _cg.version(): raise ValueError("Using stale builder. Create .new_graph() after computation graph is renewed.") res = [] #def CExpression cexp cexps = self.thisptr.get_s(i) for cexp in cexps: res.append(Expression.from_cexpr(self.cg_version, cexp)) return res def initial_state(self,vecs=None): if self._init_state is None or self.cg_version != _cg.version(): self.new_graph() if vecs is not None: self.start_new_sequence(vecs) else: self.start_new_sequence() self._init_state = RNNState(self, -1) return self._init_state def initial_state_from_raw_vectors(self,vecs=None): if self._init_state is None or self.cg_version != _cg.version(): self.new_graph() if vecs is not None: es = [] for v in vecs: e = vecInput(len(v)) e.set(v) es.append(e) self.start_new_sequence(es) else: self.start_new_sequence() self._init_state = RNNState(self, -1) return self._init_state class SimpleRNNBuilder(_RNNBuilder): def __init__(self, layers, input_dim, hidden_dim, model): self.cg_version = -1 self.layers = layers self.input_dim = input_dim self.hidden_dim = hidden_dim self.model = model self._init_state = None self.builder_version = new_builder_num() def whoami(self): return "SimpleRNNBuilder" class GRUBuilder(_RNNBuilder): def __init__(self, layers, input_dim, hidden_dim, model): self.cg_version = -1 self.layers = layers self.input_dim = input_dim self.hidden_dim = hidden_dim self.model = model self._init_state = None self.builder_version = new_builder_num() def whoami(self): return "GRUBuilder" class LSTMBuilder(_RNNBuilder): def __init__(self, layers, input_dim, hidden_dim, model): self.cg_version = -1 self.layers = layers self.input_dim = input_dim self.hidden_dim = hidden_dim self.model = model self._init_state = None self.builder_version = new_builder_num() def whoami(self): return "LSTMBuilder" class FastLSTMBuilder(_RNNBuilder): def __init__(self, layers, input_dim, hidden_dim, model): self.cg_version = -1 self.layers = layers self.input_dim = input_dim self.hidden_dim = hidden_dim self.model = model self._init_state = None self.builder_version = new_builder_num() def whoami(self): return "FastLSTMBuilder" class BiRNNBuilder(object): """ Builder for BiRNNs that delegates to regular RNNs and wires them together. builder = BiRNNBuilder(1, 128, 100, model, LSTMBuilder) [o1,o2,o3] = builder.transduce([i1,i2,i3]) """ def __init__(self, num_layers, input_dim, hidden_dim, model, rnn_builder_factory): """ @param num_layers: depth of the BiRNN @param input_dim: size of the inputs @param hidden_dim: size of the outputs (and intermediate layer representations) @param model @param rnn_builder_factory: RNNBuilder subclass, e.g. LSTMBuilder """ assert num_layers > 0 assert hidden_dim % 2 == 0 self.builder_layers = [] f = rnn_builder_factory(1, input_dim, hidden_dim/2, model) b = rnn_builder_factory(1, input_dim, hidden_dim/2, model) self.builder_layers.append((f,b)) for _ in xrange(num_layers-1): f = rnn_builder_factory(1, hidden_dim, hidden_dim/2, model) b = rnn_builder_factory(1, hidden_dim, hidden_dim/2, model) self.builder_layers.append((f,b)) def whoami(self): return "BiRNNBuilder" def set_dropout(self, p): for (fb,bb) in self.builder_layers: fb.set_dropout(p) bb.set_dropout(p) def disable_dropout(self): for (fb,bb) in self.builder_layers: fb.disable_dropout() bb.disable_dropout() def add_inputs(self, es): """ returns the list of state pairs (stateF, stateB) obtained by adding inputs to both forward (stateF) and backward (stateB) RNNs. @param es: a list of Expression see also transduce(xs) .transduce(xs) is different from .add_inputs(xs) in the following way: .add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be queried in various ways. In particular, they allow access to the previous state, as well as to the state-vectors (h() and s() ) .transduce(xs) returns a list of Expression. These are just the output expressions. For many cases, this suffices. transduce is much more memory efficient than add_inputs. """ for e in es: ensure_freshness(e) for (fb,bb) in self.builder_layers[:-1]: fs = fb.initial_state().transduce(es) bs = bb.initial_state().transduce(reversed(es)) es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))] (fb,bb) = self.builder_layers[-1] fs = fb.initial_state().add_inputs(es) bs = bb.initial_state().add_inputs(reversed(es)) return [(f,b) for f,b in zip(fs, reversed(bs))] def transduce(self, es): """ returns the list of output Expressions obtained by adding the given inputs to the current state, one by one, to both the forward and backward RNNs, and concatenating. @param es: a list of Expression see also add_inputs(xs) .transduce(xs) is different from .add_inputs(xs) in the following way: .add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be queried in various ways. In particular, they allow access to the previous state, as well as to the state-vectors (h() and s() ) .transduce(xs) returns a list of Expression. These are just the output expressions. For many cases, this suffices. transduce is much more memory efficient than add_inputs. """ for e in es: ensure_freshness(e) for (fb,bb) in self.builder_layers: fs = fb.initial_state().transduce(es) bs = bb.initial_state().transduce(reversed(es)) es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))] return es class RNNState(object): # {{{ def __init__(self, builder, state_idx=-1, prev_state=None, out=None): self.builder = builder self.state_idx=state_idx self._prev = prev_state self._out = out def add_input(self, x): # x: Expression input_dim = make_dim(self.builder.input_dim) input_dim = x.dim if x.dim==input_dim else InvalidConcreteDim(x.dim, input_dim) rnn_type = self.builder.whoami() if rnn_type.endswith("Builder"): rnn_type = rnn_type[:-len("Builder")] output_e = GVExpr('RNNState', [x, input_dim, rnn_type, self.builder.builder_version, self.state_idx+1], dim=make_dim(self.builder.hidden_dim)) new_state = RNNState(self.builder, self.state_idx+1, self, output_e) return new_state def add_inputs(self, xs): if self._prev is None: self.builder.builder_version = new_builder_num() states = [] cur = self for x in xs: cur = cur.add_input(x) states.append(cur) return states def transduce(self, xs): return [x.output() for x in self.add_inputs(xs)] def output(self): return self._out def prev(self): return self._prev def b(self): return self.builder def get_state_idx(self): return self.state_idx # StackedRNNState TODO: do at least minimal testing for this #{{{ class StackedRNNState(object): #def list states #def StackedRNNState prev def __init__(self, states, prev=None): self.states = states self.prev = prev def add_input(self, x): #def next_states next_states = [] for s in self.states: next_states.append(s.add_input(x)) x = next_states[-1].output() return StackedRNNState(next_states, self) def output(self): return self.states[-1].output() def prev(self): return self.prev def h(self): return [s.h() for s in self.states] def s(self): return [s.s() for s in self.states] def add_inputs(self, xs): """ returns the list of states obtained by adding the given inputs to the current state, one by one. """ states = [] cur = self for x in xs: cur = cur.add_input(x) states.append(cur) return states class Trainer(object): def update(self, s=1.0): pass def update_epoch(self, r = 1.0): pass def status(self): pass def set_clip_threshold(self, thr): pass def get_clip_threshold(self): pass class SimpleSGDTrainer(Trainer): """ This object is very cool! """ def __init__(self, m, e0 = 0.1, *args): pass class MomentumSGDTrainer(Trainer): def __init__(self, m, e0 = 0.01, mom = 0.9, *args): pass class AdagradTrainer(Trainer): def __init__(self, m, e0 = 0.1, eps = 1e-20, *args): pass class AdadeltaTrainer(Trainer): def __init__(self, m, eps = 1e-6, rho = 0.95, *args): pass class AdamTrainer(Trainer): def __init__(self, m, alpha = 0.001, beta_1 = 0.9, beta_2 = 0.999, eps = 1e-8, *args ): pass class Initializer(object): pass class NormalInitializer(Initializer): def __init__(self, mean=0, var=1): pass class UniformInitializer(Initializer): def __init__(self, scale): pass class ConstInitializer(Initializer): def __init__(self, c): pass class GlorotInitializer(Initializer): def __init__(self, is_lookup=False): pass class FromFileInitializer(Initializer): def __init__(self, fname): pass class NumpyInitializer(Initializer): def __init__(self, array): pass def shape_str(e_dim): if e_dim.invalid(): #return '{??}' return str(e_dim) elif e_dim.inferred: if e_dim[1] == 1: return '{%s}' % (e_dim[0]) else: return '{%s,%s}' % (e_dim[0],e_dim[1]) else: if e_dim[1] == 1: return '{{%s}}' % (e_dim[0]) else: return '{{%s,%s}}' % (e_dim[0],e_dim[1]) class GVNode(object): def __init__(self, name, input_dim, label, output_dim, children, features, node_type, expr_name): self.name = name self.input_dim = input_dim self.label = label self.output_dim = output_dim self.children = children self.features = features self.node_type = node_type self.expr_name = expr_name def __iter__(self): return iter([self.name, self.input_dim, self.label, self.output_dim, self.children, self.features, self.node_type, self.expr_name]) def __repr__(self): return 'GVNode(%s)' % ', '.join(map(str, self)) def __str__(self): return repr(self) def __lt__(self, other): return id(self) < id(other) def make_network_graph(compact, expression_names, lookup_names): """ Make a network graph, represented as of nodes and a set of edges. The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string) # The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)] """ nodes = set() # edges = defaultdict(set) # parent -> (child, extra) var_name_dict = dict() if expression_names: for e in graphviz_items: # e: Expression if e in expression_names: var_name_dict[e.vindex] = expression_names[e] rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A'))) def vidx2str(vidx): return '%s%s' % ('N', vidx) for e in graphviz_items: # e: Expression vidx = e.vindex f_name = e.name args = e.args output_dim = e.dim input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim children = set() node_type = '2_regular' if f_name == 'vecInput': [_dim] = args arg_strs = [] elif f_name == 'inputVector': [_v] = args arg_strs = [] elif f_name == 'matInput': [_d1, _d2] = args arg_strs = [] elif f_name == 'inputMatrix': [_v, _d] = args arg_strs = [] elif f_name == 'parameters': [_dim] = args arg_strs = [] if compact: if vidx in var_name_dict: f_name = var_name_dict[vidx] node_type = '1_param' elif f_name == 'lookup_parameters': [_dim] = args arg_strs = [] if compact: if vidx in var_name_dict: f_name = var_name_dict[vidx] node_type = '1_param' elif f_name == 'lookup': [p, idx, update] = args [_dim] = p.args if vidx in var_name_dict: name = var_name_dict[vidx] else: name = None item_name = None if lookup_names and p in expression_names: param_name = expression_names[p] if param_name in lookup_names: item_name = '\\"%s\\"' % (lookup_names[param_name][idx],) if compact: if item_name is not None: f_name = item_name elif name is not None: f_name = '%s[%s]' % (name, idx) else: f_name = 'lookup(%s)' % (idx) arg_strs = [] else: arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))] if item_name is not None: arg_strs.append(item_name) vocab_size = _dim[0] arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed']) #children.add(vidx2str(p.vindex)) #node_type = '1_param' elif f_name == 'RNNState': [arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e rnn_name = rnn_bldr_name[bldr_num] if bldr_type.endswith('Builder'): bldr_type[:-len('Builder')] f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx) if not compact: i = arg.vindex s = var_name_dict.get(i, 'v%d' % (i)) arg_strs = [s] else: arg_strs = [] children.add(vidx2str(arg.vindex)) node_type = '3_rnn_state' else: arg_strs = [] for arg in args: if isinstance(arg, Expression): if not compact: i = arg.vindex s = var_name_dict.get(i, 'v%d' % (i)) arg_strs.append(s) children.add(vidx2str(arg.vindex)) elif isinstance(arg, float) and compact: s = re.sub('0+$', '', '%.3f' % (arg)) if s == '0.': s = str(arg) arg_strs.append(s) else: arg_strs.append(str(arg)) # f_name = { , # }.get(f_name, f_name) if compact: f_name = { 'add': '+', 'sub': '-', 'mul': '*', 'div': '/', 'cadd': '+', 'cmul': '*', 'cdiv': '/', 'scalarsub': '-', 'concatenate': 'cat', 'esum': 'sum', 'emax': 'max', 'emin': 'min', }.get(f_name, f_name) if arg_strs: str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs)) else: str_repr = f_name elif f_name == 'add': [a,b] = arg_strs str_repr = '%s + %s' % (a,b) elif f_name == 'sub': [a,b] = arg_strs str_repr = '%s - %s' % (a,b) elif f_name == 'mul': [a,b] = arg_strs str_repr = '%s * %s' % (a,b) elif f_name == 'div': [a,b] = arg_strs str_repr = '%s / %s' % (a,b) elif f_name == 'neg': [a,] = arg_strs str_repr = '-%s' % (a) elif f_name == 'affine_transform': str_repr = arg_strs[0] for i in xrange(1, len(arg_strs), 2): str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2]) else: if arg_strs is not None: str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs)) else: str_repr = f_name name = vidx2str(vidx) var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else '' # if show_dims: # str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr) label = str_repr if not compact: label = '%s = %s' % (var_name, label) features = '' # if output_dim.invalid(): # features += " [color=red,style=filled,fillcolor=red]" # node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, '')) expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name)) return nodes def parents_of(n, nodes): ps = [] for n in nodes: for c in n.children: if n in c.children: ps.append return ps def collapse_birnn_states(nodes, compact): node_info = {n.name:n for n in nodes} new_nodes = [] children_forwards = dict() # if `n.children` is pointing to K, return V instead rnn_state_nodes = [] rnn_parents = defaultdict(set) # rnn_state_node -> [parent_expression] rnn_children = {} # rnn_state_node -> [child_expression] shared_rnn_states = defaultdict(set) # (input name, output name) -> [(rnn state name)] rnn_groups = dict() # these nodes (keys) are being replaced by the new group nodes (values) nodes_to_delete = set() for n in nodes: for c in n.children: if node_info[c].node_type == '3_rnn_state': rnn_parents[node_info[c].name].add(n.name) if n.node_type == '3_rnn_state': rnn_state_nodes.append(n) rnn_children[n.name] = set(node_info[c].name for c in n.children) for n in rnn_state_nodes: in_e, = rnn_children[n.name] out_e, = rnn_parents[n.name] shared_rnn_states[(in_e, out_e)].add(n) for ((in_e, out_e), ns) in shared_rnn_states.iteritems(): input_dims = set(n.input_dim for n in ns) output_dims = set(n.output_dim for n in ns) if len(ns) > 1 and len(input_dims)==1 and len(output_dims)==1: input_dim, = input_dims output_dim, = output_dims new_rnn_group_state_name = ''.join(n.name for n in sorted(ns)) new_rnn_group_state_label = '\\n'.join(n.label for n in sorted(ns)) if not compact: new_rnn_group_state_label = '%s\\n%s' % (node_info[out_e].label, new_rnn_group_state_label) cat_output_dim = make_dim(output_dim[0]*2, output_dim[1]) new_rnn_group_state = GVNode(new_rnn_group_state_name, input_dim, new_rnn_group_state_label, cat_output_dim, frozenset([in_e]), '', '3_rnn_state', node_info[out_e].expr_name) for n in ns: rnn_groups[n.name] = new_rnn_group_state.name # children_forwards[n.name] = new_rnn_group_state.name nodes_to_delete.add(n.name) children_forwards[out_e] = new_rnn_group_state.name nodes.add(new_rnn_group_state) nodes_to_delete.add(out_e) # TODO: WHEN WE DELETE A CAT NODE, MAKE SURE WE FORWARD TO THE **NEW GROUP STATE NODE** for (name, input_dim, label, output_dim, children, features, node_type, expr_name) in nodes: if name not in nodes_to_delete: new_children = [] for c in children: while c in children_forwards: c = children_forwards[c] new_children.append(c) new_nodes.append(GVNode(name, input_dim, label, output_dim, new_children, features, node_type, expr_name)) return (new_nodes, rnn_groups) def print_graphviz(compact=False, show_dims=True, expression_names=None, lookup_names=None, collapse_birnns=False): original_nodes = make_network_graph(compact, expression_names, lookup_names) nodes = original_nodes collapse_to = dict() if collapse_birnns: (nodes, birnn_collapse_to) = collapse_birnn_states(nodes, compact) collapse_to.update(birnn_collapse_to) print('digraph G {') print(' rankdir=BT;') if not compact: print(' nodesep=.05;') node_types = defaultdict(set) for n in nodes: node_types[n.node_type].add(n.name) for node_type in sorted(node_types): style = { '1_param': '[shape=ellipse]', '2_regular': '[shape=rect]', '3_rnn_state': '[shape=rect, peripheries=2]', }[node_type] print(' node %s; ' % (style), ' '.join(node_types[node_type])) # all_nodes = set(line.strip().split()[0] for line in node_def_lines) for n in nodes: label = n.label if show_dims: if n.expr_name is not None: label = '%s\\n%s' % (n.expr_name, label) label = '%s\\n%s' % (shape_str(n.output_dim), label) if n.input_dim is not None: label = '%s\\n%s' % (label, shape_str(n.input_dim)) if n.output_dim.invalid() or (n.input_dim is not None and n.input_dim.invalid()): n.features += " [color=red,style=filled,fillcolor=red]" print(' %s [label="%s"] %s;' % (n.name, label, n.features)) for c in n.children: print(' %s -> %s;' % (c, n.name)) rnn_states = [] # (name, rnn_name, state_idx) rnn_state_re = re.compile("[^-]+-(.)-(\\d+)") for n in original_nodes: if n.node_type == '3_rnn_state': m = rnn_state_re.search(n.label) assert m is not None, 'rnn_state_re.search(%s); %s' % (n.label, n) (rnn_name, state_idx) = m.groups() rnn_states.append((rnn_name, int(state_idx), n.name)) rnn_states = sorted(rnn_states) edges = set() for ((rnn_name_p, state_idx_p, name_p), (rnn_name_n, state_idx_n, name_n)) in zip(rnn_states,rnn_states[1:]): if rnn_name_p == rnn_name_n: if state_idx_p+1 == state_idx_n: group_name_p = collapse_to.get(name_p, name_p) group_name_n = collapse_to.get(name_n, name_n) edges.add((group_name_p, group_name_n)) for (name_p, name_n) in edges: print(' %s -> %s [style=dotted];' % (name_p, name_n)) # ,dir=both print('}')
# -*- coding: utf-8 -*- """Test suite for printing. """ # Standard library imports import importlib.resources as ir import io import shutil import struct import unittest import uuid import warnings # Third party library imports ... import lxml.etree # Local imports import glymur from glymur import Jp2k from glymur.jp2box import UUIDBox from . import fixtures, data TIFF_ASCII = 2 TIFF_SHORT = 3 TIFF_LONG = 4 TIFF_RATIONAL = 5 TIFF_DOUBLE = 12 SUBFILETYPE = 254 FILETYPE_REDUCEDIMAGE = 0x1 OSUBFILETYPE = 255 IMAGEWIDTH = 256 IMAGELENGTH = 257 BITSPERSAMPLE = 258 COMPRESSION = 259 COMPRESSION_NONE = 1 PHOTOMETRIC = 262 STRIPOFFSETS = 273 ORIENTATION = 274 PHOTOMETRIC_MINISBLACK = 1 SAMPLESPERPIXEL = 277 ROWSPERSTRIP = 278 STRIPBYTECOUNTS = 279 MINSAMPLEVALUE = 280 MAXSAMPLEVALUE = 281 XRESOLUTION = 282 YRESOLUTION = 283 PLANARCONFIG = 284 MODELPIXELSCALE = 33550 MODELTIEPOINT = 33922 GEOKEYDIRECTORY = 34735 GEOASCIIPARAMS = 34737 class TestSuite(fixtures.TestCommon): """Tests for XMP, Exif UUIDs.""" def _create_degenerate_geotiff(self, e): """ Create an in-memory degenerate geotiff. Parameters ---------- e : str Either '<' for little endian or '>' for big endian. Returns ------- bytes sequence of bytes making up a degenerate geotiff. Should have something like the following structure: Magic: 0x4949 <little-endian> Version: 0x2a <ClassicTIFF> Directory 0: offset 8 (0x8) next 0 (0) SubFileType (254) LONG (4) 1<1> ImageWidth (256) SHORT (3) 1<1> ImageLength (257) SHORT (3) 1<1> BitsPerSample (258) SHORT (3) 1<8> Compression (259) SHORT (3) 1<1> Photometric (262) SHORT (3) 1<1> StripOffsets (273) LONG (4) 1<1> SamplesPerPixel (277) SHORT (3) 1<1> RowsPerStrip (278) LONG (4) 1<1> StripByteCounts (279) LONG (4) 1<1> XResolution (282) RATIONAL (5) 1<75> YResolution (283) RATIONAL (5) 1<75> 33550 (0x830e) DOUBLE (12) 3<10 10 0> 33922 (0x8482) DOUBLE (12) 6<0 0 0 444650 4.64051e+06 0> 34735 (0x87af) SHORT (3) 24<1 1 0 5 1024 0 1 1 1025 0 1 1 ...> 34737 (0x87b1) ASCII (2) 45<UTM Zone 16N NAD27"|Clar ...> """ b = io.BytesIO() # Create the header. # Signature, version, offset to IFD if e == '<': buffer = struct.pack('<2sHI', b'II', 42, 8) else: buffer = struct.pack('>2sHI', b'MM', 42, 8) b.write(buffer) offset = b.tell() num_tags = 16 # The CDATA offset is past IFD tag count offset += 2 # The CDATA offset is past the IFD offset += num_tags * 12 # The CDATA offset is past the null offset to next IFD offset += 4 # The CDATA offset is past the image data offset += 1 # Write the tag count buffer = struct.pack(e + 'H', num_tags) b.write(buffer) # Write out all the IFD tags. Any data that exceeds 4 bytes has to # be appended later. lst = [ struct.pack(e + 'HHII', SUBFILETYPE, TIFF_LONG, 1, 1), struct.pack(e + 'HHII', IMAGEWIDTH, TIFF_SHORT, 1, 1), struct.pack(e + 'HHII', IMAGELENGTH, TIFF_SHORT, 1, 1), struct.pack(e + 'HHII', BITSPERSAMPLE, TIFF_SHORT, 1, 8), struct.pack(e + 'HHII', COMPRESSION, TIFF_SHORT, 1, COMPRESSION_NONE), struct.pack(e + 'HHII', PHOTOMETRIC, TIFF_SHORT, 1, 1), struct.pack(e + 'HHII', STRIPOFFSETS, TIFF_LONG, 1, 1), struct.pack(e + 'HHII', SAMPLESPERPIXEL, TIFF_SHORT, 1, 1), struct.pack(e + 'HHII', ROWSPERSTRIP, TIFF_LONG, 1, 1), struct.pack(e + 'HHII', STRIPBYTECOUNTS, TIFF_LONG, 1, 1), struct.pack(e + 'HHII', XRESOLUTION, TIFF_RATIONAL, 1, offset), struct.pack(e + 'HHII', YRESOLUTION, TIFF_RATIONAL, 1, offset + 8), struct.pack(e + 'HHII', MODELPIXELSCALE, TIFF_DOUBLE, 3, offset + 16), struct.pack(e + 'HHII', MODELTIEPOINT, TIFF_DOUBLE, 6, offset + 40), struct.pack(e + 'HHII', GEOKEYDIRECTORY, TIFF_SHORT, 24, offset + 88), struct.pack(e + 'HHII', GEOASCIIPARAMS, TIFF_ASCII, 45, offset + 136), ] for buffer in lst: b.write(buffer) # NULL pointer to next IFD buffer = struct.pack(e + 'I', 0) b.write(buffer) # Image data. Just a single byte will do. buffer = struct.pack(e + 'B', 0) b.write(buffer) # Now append the tag payloads that did not fit into the IFD. # XResolution tag_payloads = [ (e + 'I', 75), # XResolution (e + 'I', 1), (e + 'I', 75), # YResolution (e + 'I', 1), (e + 'd', 10), # Model pixel scale tag (e + 'd', 10), (e + 'd', 0), ] # MODELTIEPOINT datums = [0.0, 0.0, 0.0, 44650.0, 4640510.0, 0.0] for datum in datums: tag_payloads.append((e + 'd', datum)) # GeoKeyDirectory datums = [ 1, 1, 0, 5, 1024, 0, 1, 1, 1025, 0, 1, 1, 1026, 34737, 20, 0, 2049, 34737, 24, 20, 3072, 0, 1, 26716, ] for datum in datums: tag_payloads.append((e + 'H', datum)) # GEOASCIIPARAMS items = (e + '45s', b'UTM Zone 16N NAD27"|Clarke, 1866 by Default| ') tag_payloads.append(items) # Tag payloads for format, datum in tag_payloads: buffer = struct.pack(format, datum) b.write(buffer) b.seek(0) return b.read() def test__printing__geotiff_uuid__xml_sidecar(self): """ SCENARIO: Print a geotiff UUID with XML sidecar file EXPECTED RESULT: Should not error out. """ box_data = ir.read_binary('tests.data', '0220000800_uuid.dat') bf = io.BytesIO(box_data) bf.seek(8) box = UUIDBox.parse(bf, 0, 703) str(box) def test_append_xmp_uuid(self): """ SCENARIO: Append an XMP UUID box to an existing JP2 file. EXPECTED RESULT: The new last box in the JP2 file is UUID. """ the_uuid = uuid.UUID('be7acfcb-97a9-42e8-9c71-999491e3afac') raw_data = fixtures.SIMPLE_RDF.encode('utf-8') shutil.copyfile(self.jp2file, self.temp_jp2_filename) jp2 = Jp2k(self.temp_jp2_filename) ubox = glymur.jp2box.UUIDBox(the_uuid=the_uuid, raw_data=raw_data) jp2.append(ubox) # Should be two UUID boxes now. expected_ids = ['jP ', 'ftyp', 'jp2h', 'uuid', 'jp2c', 'uuid'] actual_ids = [b.box_id for b in jp2.box] self.assertEqual(actual_ids, expected_ids) # The data should be an XMP packet, which gets interpreted as # an ElementTree. self.assertTrue(isinstance(jp2.box[-1].data, lxml.etree._ElementTree)) def test_bad_exif_tag(self): """ Corrupt the Exif IFD with an invalid tag should produce a warning. """ b = self._create_exif_uuid('<') b.seek(0) buffer = b.read() # The first tag should begin at byte 32. Replace the entire IDF # entry with zeros. tag = struct.pack('<HHII', 0, 3, 0, 0) buffer = buffer[:40] + tag + buffer[52:] b = io.BytesIO() b.write(buffer) b.seek(8) with self.assertWarns(UserWarning): box = glymur.jp2box.UUIDBox.parse(b, 0, 418) self.assertEqual(box.box_id, 'uuid') # Should still get the IFD. 16 tags. self.assertEqual(len(box.data.keys()), 16) def test_exif(self): """ Verify read of both big and little endian Exif IFDs. """ # Check both little and big endian. for endian in ['<', '>']: self._test_endian_exif(endian) def _create_exif_uuid(self, endian): """ Create a buffer that can be parsed as an Exif UUID. Parameters ---------- endian : str Either '<' for little endian or '>' for big endian """ b = io.BytesIO() # Write L, T, UUID identifier. # 388 = length of degenerate tiff # 6 = Exif\x0\x0 # 16 = length of UUID identifier # 8 = length of L, T # 388 + 6 + 16 + 8 = 418 b.write(struct.pack('>I4s', 418, b'uuid')) b.write(b'JpgTiffExif->JP2') b.write(b'Exif\x00\x00') buffer = self._create_degenerate_geotiff(endian) b.write(buffer) b.seek(8) return b def _test_endian_exif(self, endian): """ Test Exif IFDs. Parameters ---------- endian : str Either '<' for little endian or '>' for big endian """ bptr = self._create_exif_uuid(endian) box = glymur.jp2box.UUIDBox.parse(bptr, 0, 418) self.assertEqual(box.data['XResolution'], 75) expected = 'UTM Zone 16N NAD27"|Clarke, 1866 by Default| ' self.assertEqual(box.data['GeoAsciiParams'], expected) @unittest.skip('not sure why this was corrupt') def test_print_bad_geotiff(self): """ SCENARIO: A GeoTIFF UUID is corrupt. EXPECTED RESULT: The string representation should validate and clearly state that the UUID box is corrupt. """ with ir.path(data, 'issue398.dat') as path: with path.open('rb') as f: f.seek(8) with warnings.catch_warnings(): # Ignore the warnings about invalid TIFF tags, we already # know that. warnings.simplefilter('ignore') box = glymur.jp2box.UUIDBox.parse(f, 0, 380) actual = str(box) expected = ("UUID Box (uuid) @ (0, 380)\n" " UUID: " "b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03 (GeoTIFF)\n" " UUID Data: corrupt") self.assertEqual(actual, expected) class TestSuiteHiRISE(fixtures.TestCommon): """Tests for HiRISE RDRs.""" def setUp(self): super(TestSuiteHiRISE, self).setUp() # Hand-create the boxes needed for HiRISE. the_uuid = uuid.UUID('2b0d7e97-aa2e-317d-9a33-e53161a2f7d0') ulst = glymur.jp2box.UUIDListBox([the_uuid]) version = 0 flag = [0, 0, 0] url = 'ESP_032436_1755_COLOR.LBL' debox = glymur.jp2box.DataEntryURLBox(version, flag, url) uuidinfo = glymur.jp2box.UUIDInfoBox([ulst, debox]) uuid_data = ir.read_binary(data, 'degenerate_geotiff.tif') the_uuid = uuid.UUID('b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03') geotiff_uuid = glymur.jp2box.UUIDBox(the_uuid, uuid_data) # Fabricate a new JP2 file out of the signature, file type, header, # and codestream out of nemo.jp2, but add in the UUIDInfo and UUID # box from HiRISE. jp2 = Jp2k(self.jp2file) boxes = [jp2.box[0], jp2.box[1], jp2.box[2], uuidinfo, geotiff_uuid, jp2.box[-1]] self.hirise_jp2file_name = self.test_dir_path / 'hirise.jp2' jp2.wrap(self.hirise_jp2file_name, boxes=boxes) def test_tags(self): jp2 = Jp2k(self.hirise_jp2file_name) self.assertEqual(jp2.box[4].data['GeoDoubleParams'], (0.0, 180.0, 0.0, 0.0, 3396190.0, 3396190.0)) self.assertEqual(jp2.box[4].data['GeoAsciiParams'], 'Equirectangular MARS|GCS_MARS|') self.assertEqual(jp2.box[4].data['GeoKeyDirectory'], ( 1, 1, 0, 18, # noqa 1024, 0, 1, 1, # noqa 1025, 0, 1, 1, # noqa 1026, 34737, 21, 0, # noqa 2048, 0, 1, 32767, # noqa 2049, 34737, 9, 21, # noqa 2050, 0, 1, 32767, # noqa 2054, 0, 1, 9102, # noqa 2056, 0, 1, 32767, # noqa 2057, 34736, 1, 4, # noqa 2058, 34736, 1, 5, # noqa 3072, 0, 1, 32767, # noqa 3074, 0, 1, 32767, # noqa 3075, 0, 1, 17, # noqa 3076, 0, 1, 9001, # noqa 3082, 34736, 1, 2, # noqa 3083, 34736, 1, 3, # noqa 3088, 34736, 1, 1, # noqa 3089, 34736, 1, 0, # noqa )) self.assertEqual(jp2.box[4].data['ModelPixelScale'], (0.25, 0.25, 0.0)) self.assertEqual(jp2.box[4].data['ModelTiePoint'], ( 0.0, 0.0, 0.0, -2523306.125, -268608.875, 0.0 )) @unittest.skipIf(not fixtures._HAVE_GDAL, 'Could not load GDAL') def test_printing_geotiff_uuid(self): """ SCENARIO: Print a geotiff UUID. EXPECTED RESULT: Should match a known geotiff UUID. The string representation validates. """ jp2 = Jp2k(self.hirise_jp2file_name) self.maxDiff = None actual = str(jp2.box[4]) expected = fixtures.GEOTIFF_UUID self.assertEqual(actual, expected)
# Copyright (C) 2012-2013 Claudio Guarnieri. # Copyright (C) 2014-2018 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. import ConfigParser import click import os import logging import re from cuckoo.common.exceptions import CuckooConfigurationError from cuckoo.common.objects import Dictionary from cuckoo.common.utils import parse_bool from cuckoo.misc import cwd log = logging.getLogger(__name__) _cache = {} class Type(object): """Base Class for Type Definitions""" def __init__(self, default=None, required=True, sanitize=False, allow_empty=False): self.required = required self.sanitize = sanitize self.allow_empty = allow_empty self.default = self.parse(default) def parse(self, value): """Parse a raw input value.""" def check(self, value): """Checks the type of the value.""" def emit(self, value): """String-readable version of this object""" class Int(Type): """Integer Type Definition class.""" def parse(self, value): if isinstance(value, (int, long)): return value if isinstance(value, basestring) and value.isdigit(): return int(value) def check(self, value): if self.allow_empty and not value: return True try: click.INT(value) return True except: return False def emit(self, value): return "%d" % value if value is not None else "" class String(Type): """String Type Definition class.""" def parse(self, value): return value.strip() if value else None def check(self, value): if self.allow_empty and not value: return True return isinstance(value, basestring) def emit(self, value): return value or "" class Path(String): """Path Type Definition class.""" def __init__(self, default=None, exists=False, writable=False, readable=False, required=True, allow_empty=False, sanitize=False): self.exists = exists self.writable = writable self.readable = readable super(Path, self).__init__(default, required, sanitize, allow_empty) def parse(self, value): if self.allow_empty and not value: return try: c = click.Path( exists=self.exists, writable=self.writable, readable=self.readable ) return c.convert(value, None, None) except Exception: return value def check(self, value): if self.allow_empty and not value: return True try: c = click.Path( exists=self.exists, writable=self.writable, readable=self.readable ) c.convert(value, None, None) return True except: return False def emit(self, value): return value or "" class Boolean(Type): """Boolean Type Definition class.""" def parse(self, value): try: return parse_bool(value) except: log.error("Incorrect Boolean %s", value) def check(self, value): try: parse_bool(value) return True except: return False def emit(self, value): return "yes" if value else "no" class UUID(Type): """UUID Type Definition class.""" def parse(self, value): try: c = click.UUID(value) return str(c) except: log.error("Incorrect UUID %s", value) def check(self, value): """Checks if the value is of type UUID.""" try: click.UUID(value) return True except: return False def emit(self, value): return value class List(Type): """List Type Definition class.""" def __init__(self, subclass, default, sep=",", strip=True): self.subclass = subclass self.sep = sep self.strip = strip super(List, self).__init__(default) def parse(self, value): if value is None: return [] try: ret = [] if isinstance(value, (tuple, list)): for entry in value: ret.append(self.subclass().parse(entry)) return ret for entry in re.split("[%s]" % self.sep, value): if self.strip: entry = entry.strip() if not entry: continue ret.append(self.subclass().parse(entry)) return ret except: log.error("Incorrect list: %s", value) def check(self, value): try: value.split(self.sep) return True except: return False def emit(self, value): return (", " if self.sep[0] == "," else self.sep[0]).join(value or "") class Config(object): """Configuration file parser.""" configuration = { "cuckoo": { "cuckoo": { "version_check": Boolean(True), "delete_original": Boolean(False), "delete_bin_copy": Boolean(False), "machinery": String("virtualbox"), "memory_dump": Boolean(False), "terminate_processes": Boolean(False), "reschedule": Boolean(False), "process_results": Boolean(True), "max_analysis_count": Int(0), "max_machines_count": Int(0), "max_vmstartup_count": Int(10), "freespace": Int(1024), "tmppath": Path( exists=True, writable=True, readable=False, allow_empty=True ), "api_token": String( allow_empty=True, sanitize=True, required=False ), "web_secret": String( allow_empty=True, sanitize=True, required=False ), "rooter": Path( "/tmp/cuckoo-rooter", exists=False, writable=False, readable=False ), }, "feedback": { "enabled": Boolean(False), "name": String(), "company": String(), "email": String(), }, "resultserver": { "ip": String("192.168.56.1"), "port": Int(2042), "force_port": Boolean(False), "upload_max_size": Int(128 * 1024 * 1024), }, "processing": { "analysis_size_limit": Int(128 * 1024 * 1024), "resolve_dns": Boolean(True), "sort_pcap": Boolean(True), }, "database": { "connection": String(sanitize=True), "timeout": Int(60, allow_empty=True), }, "timeouts": { "default": Int(120), "critical": Int(60), "vm_state": Int(60), }, "remotecontrol": { "enabled": Boolean(False), "guacd_host": String("localhost"), "guacd_port": Int(4822), }, }, "virtualbox": { "virtualbox": { "mode": String("headless"), "path": Path( "/usr/bin/VBoxManage", exists=False, writable=False, readable=True ), "interface": String("vboxnet0"), "machines": List(String, "cuckoo1"), "controlports": String("5000-5050", required=False), }, "*": { "__section__": "cuckoo1", "label": String("cuckoo1"), "platform": String("windows"), "ip": String("192.168.56.101"), "snapshot": String(), "interface": String(), "resultserver_ip": String(), "resultserver_port": Int(), "tags": String(), "options": List(String, None, ",\\s"), "osprofile": String(required=False), }, "__star__": ("virtualbox", "machines"), }, "auxiliary": { "sniffer": { "enabled": Boolean(True), "tcpdump": Path( "/usr/sbin/tcpdump", exists=False, writable=False, readable=True ), "bpf": String(), }, "mitm": { "enabled": Boolean(False), "mitmdump": Path( "/usr/local/bin/mitmdump", exists=False, writable=False, readable=True ), "port_base": Int(50000), "script": Path( "stuff/mitm.py", exists=False, writable=False, readable=True ), "certificate": Path( "bin/cert.p12", exists=False, writable=False, readable=True ), }, "replay": { "enabled": Boolean(True, required=False), "mitmdump": Path( "/usr/local/bin/mitmdump", exists=False, writable=False, readable=True, required=False ), "port_base": Int(51000, required=False), "certificate": Path( "bin/cert.p12", exists=False, writable=False, readable=True, required=False ), }, "services": { "enabled": Boolean(False), "services": String("honeyd"), "timeout": Int(0), }, "reboot": { "enabled": Boolean(True), }, }, "avd": { "avd": { "mode": String("headless"), "emulator_path": Path( "/home/cuckoo/android-sdk-linux/tools/emulator", exists=True, writable=False, readable=True ), "adb_path": Path( "/home/cuckoo/android-sdk-linux/platform-tools/adb", exists=True, writable=False, readable=True ), "avd_path": Path( "/home/cuckoo/.android/avd", exists=True, writable=False, readable=True ), "reference_machine": String("cuckoo-bird"), "machines": List(String, "cuckoo1"), }, "*": { "__section__": "cuckoo1", "label": String("cuckoo1"), "platform": String("android"), "ip": String("127.0.0.1"), "emulator_port": Int(5554), "resultserver_ip": String("10.0.2.2"), "resultserver_port": Int(2042), "osprofile": String(required=False), }, "__star__": ("avd", "machines"), }, "esx": { "esx": { "dsn": String("esx://127.0.0.1/?no_verify=1"), "username": String("username_goes_here"), "password": String("password_goes_here", sanitize=True), "machines": List(String, "analysis1"), "interface": String("eth0"), }, "*": { "__section__": "analysis1", "label": String("cuckoo1"), "platform": String("windows"), "ip": String("192.168.122.101"), "snapshot": String("clean_snapshot"), "interface": String(), "resultserver_ip": String(), "resultserver_port": Int(), "tags": String(), "osprofile": String(required=False), }, "__star__": ("esx", "machines"), }, "kvm": { "kvm": { "dsn": String("qemu:///system", required=False), "interface": String("virbr0"), "machines": List(String, "cuckoo1"), }, "*": { "__section__": "cuckoo1", "label": String("cuckoo1"), "platform": String("windows"), "ip": String("192.168.122.101"), "snapshot": String(), "interface": String(), "resultserver_ip": String(), "resultserver_port": Int(), "tags": String(), "osprofile": String(required=False), }, "__star__": ("kvm", "machines"), }, "memory": { "basic": { "guest_profile": String("WinXPSP2x86"), "delete_memdump": Boolean(False), }, "malfind": { "enabled": Boolean(True), "filter": Boolean(True), }, "apihooks": { "enabled": Boolean(False), "filter": Boolean(True), }, "pslist": { "enabled": Boolean(True), "filter": Boolean(False), }, "psxview": { "enabled": Boolean(True), "filter": Boolean(False), }, "callbacks": { "enabled": Boolean(True), "filter": Boolean(False), }, "idt": { "enabled": Boolean(True), "filter": Boolean(False), }, "timers": { "enabled": Boolean(True), "filter": Boolean(False), }, "messagehooks": { "enabled": Boolean(False), "filter": Boolean(False), }, "getsids": { "enabled": Boolean(True), "filter": Boolean(False), }, "privs": { "enabled": Boolean(True), "filter": Boolean(False), }, "dlllist": { "enabled": Boolean(True), "filter": Boolean(True), }, "handles": { "enabled": Boolean(True), "filter": Boolean(True), }, "ldrmodules": { "enabled": Boolean(True), "filter": Boolean(True), }, "mutantscan": { "enabled": Boolean(True), "filter": Boolean(True), }, "devicetree": { "enabled": Boolean(True), "filter": Boolean(True), }, "svcscan": { "enabled": Boolean(True), "filter": Boolean(True), }, "modscan": { "enabled": Boolean(True), "filter": Boolean(True), }, "yarascan": { "enabled": Boolean(True), "filter": Boolean(True), }, "ssdt": { "enabled": Boolean(True), "filter": Boolean(True), }, "gdt": { "enabled": Boolean(True), "filter": Boolean(True), }, "sockscan": { "enabled": Boolean(True), "filter": Boolean(False), }, "netscan": { "enabled": Boolean(True), "filter": Boolean(False), }, "mask": { "enabled": Boolean(False), "pid_generic": List(String, None), }, }, "physical": { "physical": { "machines": List(String, "physical1"), "user": String("username"), "password": String("password", sanitize=True), "interface": String("eth0"), }, "fog": { "hostname": String("none"), "username": String("fog"), "password": String("password", sanitize=True), }, "*": { "__section__": "physical1", "label": String("physical1"), "platform": String("windows"), "ip": String("192.168.56.101"), "osprofile": String(required=False), }, "__star__": ("physical", "machines"), }, "processing": { "analysisinfo": { "enabled": Boolean(True), }, "apkinfo": { "enabled": Boolean(False), "decompilation_threshold": Int(5000000), }, "baseline": { "enabled": Boolean(False), }, "behavior": { "enabled": Boolean(True), }, "buffer": { "enabled": Boolean(True), }, "debug": { "enabled": Boolean(True), }, "droidmon": { "enabled": Boolean(False), }, "dropped": { "enabled": Boolean(True), }, "dumptls": { "enabled": Boolean(True), }, "extracted": { "enabled": Boolean(True, required=False), }, "googleplay": { "enabled": Boolean(False), "android_id": String(), "google_login": String(), "google_password": String(sanitize=True), }, "memory": { "enabled": Boolean(False), }, "misp": { "enabled": Boolean(False), "url": String(), "apikey": String(sanitize=True), "maxioc": Int(100), }, "network": { "enabled": Boolean(True), "whitelist_dns": Boolean(False), "allowed_dns": String(), }, "procmemory": { "enabled": Boolean(True), "idapro": Boolean(False), "extract_img": Boolean(True), "extract_dll": Boolean(False), "dump_delete": Boolean(False), }, "procmon": { "enabled": Boolean(True), }, "screenshots": { "enabled": Boolean(True), "tesseract": String("no"), }, "snort": { "enabled": Boolean(False), "snort": Path( "/usr/local/bin/snort", exists=False, writable=False, readable=True ), "conf": Path( "/etc/snort/snort.conf", exists=False, writable=False, readable=True ), }, "static": { "enabled": Boolean(True), "pdf_timeout": Int(60), }, "strings": { "enabled": Boolean(True), }, "suricata": { "enabled": Boolean(False), "suricata": Path( "/usr/bin/suricata", exists=True, writable=False, readable=True ), "conf": Path( "/etc/suricata/suricata.yaml", exists=True, writable=False, readable=True ), "eve_log": Path( "eve.json", exists=False, writable=True, readable=False ), "files_log": Path( "files-json.log", exists=False, writable=True, readable=False ), "files_dir": Path( "files", exists=False, writable=False, readable=True ), "socket": Path( exists=True, writable=False, readable=True, allow_empty=True ), }, "targetinfo": { "enabled": Boolean(True), }, "virustotal": { "enabled": Boolean(False), "timeout": Int(60), "scan": Boolean(False), "key": String("a0283a2c3d55728300d064874239b5346fb991317e8449fe43c902879d758088", sanitize=True), }, "irma": { "enabled": Boolean(False), "timeout": Int(60), "scan": Boolean(False), "force": Boolean(False), "url": String(), "probes": String(required=False), }, }, "qemu": { "qemu": { "path": Path( "/usr/bin/qemu-system-x86_64", exists=True, writable=False, readable=True ), "interface": String("qemubr"), "machines": List(String, "vm1,vm2,vm3"), }, "*": [ { "__section__": "vm1", "label": String("vm1"), "image": Path( "/home/rep/vms/qvm_wheezy64_1.qcow2", exists=True, writable=False, readable=True ), "snapshot": String(required=False), "arch": String(), "enable_kvm": Boolean(False), "platform": String("linux"), "ip": String("192.168.55.2"), "interface": String("qemubr"), "resultserver_ip": String("192.168.55.1"), "resultserver_port": Int(), "tags": String("debian_wheezy,64_bit"), "kernel": String(), "initrd": String(), "osprofile": String(required=False), }, { "__section__": "vm2", "label": String("vm2"), "image": Path( "/home/rep/vms/qvm_wheezy64_1.qcow2", exists=True, writable=False, readable=True ), "snapshot": String(required=False), "arch": String("mipsel"), "enable_kvm": Boolean(False), "platform": String("linux"), "ip": String("192.168.55.3"), "interface": String("qemubr"), "resultserver_ip": String("192.168.55.1"), "resultserver_port": Int(), "tags": String("debian_wheezy,mipsel"), "kernel": String( "{imagepath}/vmlinux-3.16.0-4-4kc-malta-mipsel" ), "osprofile": String(""), }, { "__section__": "vm3", "label": String("vm3"), "image": Path( "/home/rep/vms/qvm_wheezy64_1.qcow2", exists=True, writable=False, readable=True ), "snapshot": String(required=False), "arch": String("arm"), "enable_kvm": Boolean(False), "platform": String("linux"), "ip": String("192.168.55.4"), "interface": String("qemubr"), "tags": String("debian_wheezy,arm"), "kernel": String( "{imagepath}/vmlinuz-3.2.0-4-versatile-arm" ), "initrd": String( "{imagepath}/initrd-3.2.0-4-versatile-arm" ), "osprofile": String(""), }, ], "__star__": ("qemu", "machines"), }, "reporting": { "feedback": { "enabled": Boolean(False), }, "jsondump": { "enabled": Boolean(True), "indent": Int(4), "calls": Boolean(True), }, "singlefile": { "enabled": Boolean(False), "html": Boolean(False), "pdf": Boolean(False), }, "misp": { "enabled": Boolean(False), "url": String(), "apikey": String(sanitize=True), "mode": String("maldoc ipaddr hashes url"), }, "mongodb": { "enabled": Boolean(False), "host": String("127.0.0.1"), "port": Int(27017), "db": String("cuckoo"), "store_memdump": Boolean(True), "paginate": Int(100), "username": String(), "password": String(), }, "elasticsearch": { "enabled": Boolean(False), "hosts": List(String, "127.0.0.1"), "timeout": Int(300), "calls": Boolean(False), "index": String("cuckoo"), "index_time_pattern": String("yearly"), "cuckoo_node": String(), }, "moloch": { "enabled": Boolean(False), "host": String(), "insecure": Boolean(False), "moloch_capture": Path( "/data/moloch/bin/moloch-capture", exists=True, writable=False, readable=True ), "conf": Path( "/data/moloch/etc/config.ini", exists=True, writable=False, readable=True ), "instance": String("cuckoo"), }, "notification": { "enabled": Boolean(False), "url": String(), "identifier": String(), }, "mattermost": { "enabled": Boolean(False), "username": String("cuckoo"), "url": String(), "myurl": String(), "show_virustotal": Boolean(False), "show_signatures": Boolean(False), "show_urls": Boolean(False), "hash_filename": Boolean(False), "hash_url": Boolean(False), }, }, "routing": { "routing": { "route": String("none"), "internet": String("none"), "rt_table": String("main"), "auto_rt": Boolean(True), "drop": Boolean(False), }, "inetsim": { "enabled": Boolean(False), "server": String("192.168.56.1"), "ports": String(), }, "tor": { "enabled": Boolean(False), "dnsport": Int(5353), "proxyport": Int(9040), }, "vpn": { "enabled": Boolean(False), "vpns": List(String, "vpn0"), }, "*": { "__section__": "vpn0", "name": String("vpn0"), "description": String("Spain, Europe"), "interface": String("tun0"), "rt_table": String("tun0"), }, "__star__": ("vpn", "vpns"), }, "vmware": { "vmware": { "mode": String("gui"), "path": Path( "/usr/bin/vmrun", exists=True, writable=False, readable=True ), "interface": String("virbr0"), "machines": List(String, "cuckoo1"), }, "*": { "__section__": "cuckoo1", "vmx_path": Path( "../cuckoo1/cuckoo1.vmx", exists=True, writable=False, readable=True ), "snapshot": String("Snapshot1"), "platform": String("windows"), "ip": String("192.168.54.111"), "interface": String(), "resultserver_ip": String(), "resultserver_port": Int(), "tags": String(), "osprofile": String(required=False), }, "__star__": ("vmware", "machines"), }, "vsphere": { "vsphere": { "host": String("10.0.0.1"), "port": Int(443), "user": String("username_goes_here"), "pwd": String("password_goes_here", sanitize=True), "interface": String("eth0"), "machines": List(String, "analysis1"), "unverified_ssl": Boolean(False), }, "*": { "__section__": "analysis1", "label": String("cuckoo1"), "platform": String("windows"), "snapshot": String("snapshot_name"), "ip": String("192.168.122.101"), "interface": String(), "resultserver_ip": String(required=False), "resultserver_port": Int(required=False), "tags": String(required=False), "osprofile": String(required=False), }, "__star__": ("vsphere", "machines"), }, "xenserver": { "xenserver": { "user": String("root"), "password": String("changeme", sanitize=True), "url": String("https://xenserver"), "interface": String("virbr0"), "machines": List(String, "cuckoo1"), }, "*": { "__section__": "cuckoo1", "uuid": UUID("00000000-0000-0000-0000-000000000000"), "snapshot": String(), "platform": String("windows"), "ip": String("192.168.54.111"), "interface": String(), "resultserver_ip": String(), "resultserver_port": Int(), "tags": String(), "osprofile": String(required=False), }, "__star__": ("xenserver", "machines"), }, } def get_section_types(self, file_name, section, strict=False, loose=False): """Get types for a section entry.""" section_types = get_section_types(file_name, section) if not section_types and not loose: log.error( "Config section %s:%s not found!", file_name, section ) if strict: raise CuckooConfigurationError( "Config section %s:%s not found!", file_name, section ) return return section_types def __init__(self, file_name="cuckoo", cfg=None, strict=False, loose=False, raw=False): """ @param file_name: file name without extension. @param cfg: configuration file path. """ env = {} for key, value in os.environ.items(): if key.startswith("CUCKOO_"): env[key] = value env["CUCKOO_CWD"] = cwd() env["CUCKOO_APP"] = os.environ.get("CUCKOO_APP", "") config = ConfigParser.ConfigParser(env) self.env_keys = [] for key in env.keys(): self.env_keys.append(key.lower()) self.sections = {} try: config.read(cfg or cwd("conf", "%s.conf" % file_name)) except ConfigParser.ParsingError as e: raise CuckooConfigurationError( "There was an error reading in the $CWD/conf/%s.conf " "configuration file. Most likely there are leading " "whitespaces in front of one of the key=value lines defined. " "More information from the original exception: %s" % (file_name, e) ) if file_name not in self.configuration and not loose: log.error("Unknown config file %s.conf", file_name) return for section in config.sections(): types = self.get_section_types(file_name, section, strict, loose) if types is None: continue self.sections[section] = Dictionary() setattr(self, section, self.sections[section]) try: items = config.items(section) except ConfigParser.InterpolationMissingOptionError as e: log.error("Missing environment variable(s): %s", e) raise CuckooConfigurationError( "Missing environment variable: %s" % e ) except ValueError as e: if e.message == "incomplete format key": raise CuckooConfigurationError( "One of the fields that you've filled out in " "$CWD/conf/%s contains the sequence '%(' which is " "interpreted as environment variable sequence, e.g., " "'%(PGPASSWORD)s' would locate a PostgreSQL " "password. Please update the field to correctly " "state the environment variable or change it in a " "way that '%(' is no longer in the variable." ) raise for name, raw_value in items: if name in self.env_keys: continue if "\n" in raw_value: wrong_key = "???" try: wrong_key = raw_value.split("\n", 1)[1].split()[0] except: pass raise CuckooConfigurationError( "There was an error reading in the $CWD/conf/%s.conf " "configuration file. Namely, there are one or more " "leading whitespaces before the definition of the " "'%s' key/value pair in the '%s' section. Please " "remove those leading whitespaces as Python's default " "configuration parser is unable to handle those " "properly." % (file_name, wrong_key, section) ) if not raw and name in types: # TODO Is this the area where we should be checking the # configuration values? # if not types[name].check(raw_value): # print file_name, section, name, raw_value # raise value = types[name].parse(raw_value) else: if not loose: log.error( "Type of config parameter %s:%s:%s not found! " "This may indicate that you've incorrectly filled " "out the Cuckoo configuration, please double " "check it.", file_name, section, name ) value = raw_value self.sections[section][name] = value def get(self, section): """Get option. @param section: section to fetch. @raise CuckooConfigurationError: if section not found. @return: option value. """ if section not in self.sections: raise CuckooConfigurationError( "Option %s is not found in configuration" % section ) return self.sections[section] @staticmethod def from_confdir(dirpath, loose=False, sanitize=False): """Reads all the configuration from a configuration directory. If `sanitize` is set, then black out sensitive fields.""" ret = {} for filename in os.listdir(dirpath): if not filename.endswith(".conf"): continue config_name = filename.rsplit(".", 1)[0] cfg = Config( config_name, cfg=os.path.join(dirpath, filename), loose=loose ) ret[config_name] = {} for section, values in cfg.sections.items(): ret[config_name][section] = {} types = cfg.get_section_types( config_name, section, loose=loose ) or {} for key, value in values.items(): if sanitize and key in types and types[key].sanitize: value = "*"*8 ret[config_name][section][key] = value return ret def parse_options(options): """Parse the analysis options field to a dictionary.""" ret = {} for field in options.split(","): if "=" not in field: continue key, value = field.split("=", 1) ret[key.strip()] = value.strip() return ret def emit_options(options): """Emit the analysis options from a dictionary to a string.""" return ",".join("%s=%s" % (k, v) for k, v in sorted(options.items())) def config(s, cfg=None, strict=False, raw=False, loose=False, check=False): """Fetch a configuration value, denoted as file:section:key.""" if s.count(":") != 2: raise RuntimeError("Invalid configuration entry: %s" % s) file_name, section, key = s.split(":") if check: strict = raw = loose = True type_ = Config.configuration.get(file_name, {}).get(section, {}).get(key) if strict and type_ is None: raise CuckooConfigurationError( "No such configuration value exists: %s" % s ) required = type_ is not None and type_.required index = file_name, cfg, cwd(), strict, raw, loose if index not in _cache: _cache[index] = Config( file_name, cfg=cfg, strict=strict, raw=raw, loose=loose ) config = _cache[index] if strict and required and section not in config.sections: raise CuckooConfigurationError( "Configuration value %s not present! This may indicate that " "you've incorrectly filled out the Cuckoo configuration, " "please double check it." % s ) section = config.sections.get(section, {}) if strict and required and key not in section: raise CuckooConfigurationError( "Configuration value %s not present! This may indicate that " "you've incorrectly filled out the Cuckoo configuration, " "please double check it." % s ) value = section.get(key, type_.default if type_ else None) if check and not type_.check(value): raise CuckooConfigurationError( "The configuration value %r found for %s is invalid. Please " "update your configuration!" % (value, s) ) return value def get_section_types(file_name, section, strict=False): if section in Config.configuration.get(file_name, {}): return Config.configuration[file_name][section] if "__star__" not in Config.configuration.get(file_name, {}): return {} if strict: section_, key = Config.configuration[file_name]["__star__"] if section not in config("%s:%s:%s" % (file_name, section_, key)): return {} if "*" in Config.configuration.get(file_name, {}): section_types = Config.configuration[file_name]["*"] # If multiple default values have been provided, pick one. if isinstance(section_types, (tuple, list)): section_types = section_types[0] return section_types return {} def config2(file_name, section): keys = get_section_types(file_name, section, strict=True) if not keys: raise CuckooConfigurationError( "No such configuration section exists: %s:%s" % (file_name, section) ) ret = Dictionary() for key in keys: if key == "__star__" or key == "*": continue ret[key] = config("%s:%s:%s" % (file_name, section, key)) return ret def cast(s, value): """Cast a configuration value as per its type.""" if s.count(":") != 2: raise RuntimeError("Invalid configuration entry: %s" % s) file_name, section, key = s.split(":") type_ = get_section_types(file_name, section).get(key) if type_ is None: raise CuckooConfigurationError( "No such configuration value exists: %s" % s ) return type_.parse(value) def read_kv_conf(filepath): """Reads a flat Cuckoo key/value configuration file.""" ret = {} for line in open(filepath, "rb"): line = line.strip() if not line or line.startswith("#"): continue if "=" not in line: raise CuckooConfigurationError( "Invalid flat configuration line: %s (missing '=' character)" % line ) key, raw_value = line.split("=", 1) key, raw_value = key.replace(".", ":").strip(), raw_value.strip() try: value = cast(key, raw_value) except (CuckooConfigurationError, RuntimeError) as e: raise CuckooConfigurationError( "Invalid flat configuration line: %s (error %s)" % (line, e) ) if raw_value and value is None: raise CuckooConfigurationError( "Invalid flat configuration entry: %s is None" % key ) a, b, c = key.split(":") ret[a] = ret.get(a, {}) ret[a][b] = ret[a].get(b, {}) ret[a][b][c] = value return ret
# -*- coding: utf-8 -*- """ Testing using the Test Client The test client is a class that can act like a simple browser for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. ``Client`` objects are stateful - they will retain cookie (and thus session) details for the lifetime of the ``Client`` instance. This is not intended as a replacement for Twill, Selenium, or other browser automation frameworks - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ from __future__ import unicode_literals from django.contrib.auth.models import User from django.core import mail from django.http import HttpResponse from django.test import ( Client, RequestFactory, SimpleTestCase, TestCase, override_settings, ) from django.urls import reverse_lazy from .views import get_view, post_view, trace_view @override_settings(ROOT_URLCONF='test_client.urls') class ClientTest(TestCase): @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user(username='testclient', password='password') cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False) def test_get_view(self): "GET a view" # The data is ignored, but let's check it doesn't crash the system # anyway. data = {'var': '\xf2'} response = self.client.get('/get_view/', data) # Check some response details self.assertContains(response, 'This is a test') self.assertEqual(response.context['var'], '\xf2') self.assertEqual(response.templates[0].name, 'GET Template') def test_get_post_view(self): "GET a view that normally expects POSTs" response = self.client.get('/post_view/', {}) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'Empty GET Template') self.assertTemplateUsed(response, 'Empty GET Template') self.assertTemplateNotUsed(response, 'Empty POST Template') def test_empty_post(self): "POST an empty dictionary to a view" response = self.client.post('/post_view/', {}) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'Empty POST Template') self.assertTemplateNotUsed(response, 'Empty GET Template') self.assertTemplateUsed(response, 'Empty POST Template') def test_post(self): "POST some data to a view" post_data = { 'value': 37 } response = self.client.post('/post_view/', post_data) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.context['data'], '37') self.assertEqual(response.templates[0].name, 'POST Template') self.assertContains(response, 'Data received') def test_trace(self): """TRACE a view""" response = self.client.trace('/trace_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['method'], 'TRACE') self.assertEqual(response.templates[0].name, 'TRACE Template') def test_response_headers(self): "Check the value of HTTP headers returned in a response" response = self.client.get("/header_view/") self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast') def test_response_attached_request(self): """ Check that the returned response has a ``request`` attribute with the originating environ dict and a ``wsgi_request`` with the originating ``WSGIRequest`` instance. """ response = self.client.get("/header_view/") self.assertTrue(hasattr(response, 'request')) self.assertTrue(hasattr(response, 'wsgi_request')) for key, value in response.request.items(): self.assertIn(key, response.wsgi_request.environ) self.assertEqual(response.wsgi_request.environ[key], value) def test_response_resolver_match(self): """ The response contains a ResolverMatch instance. """ response = self.client.get('/header_view/') self.assertTrue(hasattr(response, 'resolver_match')) def test_response_resolver_match_redirect_follow(self): """ The response ResolverMatch instance contains the correct information when following redirects. """ response = self.client.get('/redirect_view/', follow=True) self.assertEqual(response.resolver_match.url_name, 'get_view') def test_response_resolver_match_regular_view(self): """ The response ResolverMatch instance contains the correct information when accessing a regular view. """ response = self.client.get('/get_view/') self.assertEqual(response.resolver_match.url_name, 'get_view') def test_raw_post(self): "POST raw data (with a content type) to a view" test_doc = """<?xml version="1.0" encoding="utf-8"?> <library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library> """ response = self.client.post("/raw_post_view/", test_doc, content_type="text/xml") self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, "Book template") self.assertEqual(response.content, b"Blink - Malcolm Gladwell") def test_insecure(self): "GET a URL through http" response = self.client.get('/secure_view/', secure=False) self.assertFalse(response.test_was_secure_request) self.assertEqual(response.test_server_port, '80') def test_secure(self): "GET a URL through https" response = self.client.get('/secure_view/', secure=True) self.assertTrue(response.test_was_secure_request) self.assertEqual(response.test_server_port, '443') def test_redirect(self): "GET a URL that redirects elsewhere" response = self.client.get('/redirect_view/') # Check that the response was a 302 (redirect) self.assertRedirects(response, '/get_view/') def test_redirect_with_query(self): "GET a URL that redirects with given GET parameters" response = self.client.get('/redirect_view/', {'var': 'value'}) # Check if parameters are intact self.assertRedirects(response, '/get_view/?var=value') def test_permanent_redirect(self): "GET a URL that redirects permanently elsewhere" response = self.client.get('/permanent_redirect_view/') # Check that the response was a 301 (permanent redirect) self.assertRedirects(response, '/get_view/', status_code=301) def test_temporary_redirect(self): "GET a URL that does a non-permanent redirect" response = self.client.get('/temporary_redirect_view/') # Check that the response was a 302 (non-permanent redirect) self.assertRedirects(response, '/get_view/', status_code=302) def test_redirect_to_strange_location(self): "GET a URL that redirects to a non-200 page" response = self.client.get('/double_redirect_view/') # Check that the response was a 302, and that # the attempt to get the redirection location returned 301 when retrieved self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301) def test_follow_redirect(self): "A URL that redirects can be followed to termination." response = self.client.get('/double_redirect_view/', follow=True) self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 2) def test_follow_relative_redirect(self): "A URL with a relative redirect can be followed." response = self.client.get('/accounts/', follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/accounts/login/') def test_follow_relative_redirect_no_trailing_slash(self): "A URL with a relative redirect with no trailing slash can be followed." response = self.client.get('/accounts/no_trailing_slash', follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/accounts/login/') def test_redirect_http(self): "GET a URL that redirects to an http URI" response = self.client.get('/http_redirect_view/', follow=True) self.assertFalse(response.test_was_secure_request) def test_redirect_https(self): "GET a URL that redirects to an https URI" response = self.client.get('/https_redirect_view/', follow=True) self.assertTrue(response.test_was_secure_request) def test_notfound_response(self): "GET a URL that responds as '404:Not Found'" response = self.client.get('/bad_view/') # Check that the response was a 404, and that the content contains MAGIC self.assertContains(response, 'MAGIC', status_code=404) def test_valid_form(self): "POST valid data to a form" post_data = { 'text': 'Hello World', 'email': 'foo@example.com', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Valid POST Template") def test_valid_form_with_hints(self): "GET a form, providing hints in the GET data" hints = { 'text': 'Hello World', 'multi': ('b', 'c', 'e') } response = self.client.get('/form_view/', data=hints) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Form GET Template") # Check that the multi-value data has been rolled out ok self.assertContains(response, 'Select a valid choice.', 0) def test_incomplete_data_form(self): "POST incomplete data to a form" post_data = { 'text': 'Hello World', 'value': 37 } response = self.client.post('/form_view/', post_data) self.assertContains(response, 'This field is required.', 3) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'This field is required.') self.assertFormError(response, 'form', 'single', 'This field is required.') self.assertFormError(response, 'form', 'multi', 'This field is required.') def test_form_error(self): "POST erroneous data to a form" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'Enter a valid email address.') def test_valid_form_with_template(self): "POST valid data to a form using multiple templates" post_data = { 'text': 'Hello World', 'email': 'foo@example.com', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data OK') self.assertTemplateUsed(response, "form_view.html") self.assertTemplateUsed(response, 'base.html') self.assertTemplateNotUsed(response, "Valid POST Template") def test_incomplete_data_form_with_template(self): "POST incomplete data to a form using multiple templates" post_data = { 'text': 'Hello World', 'value': 37 } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data has errors') self.assertTemplateUsed(response, 'form_view.html') self.assertTemplateUsed(response, 'base.html') self.assertTemplateNotUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'This field is required.') self.assertFormError(response, 'form', 'single', 'This field is required.') self.assertFormError(response, 'form', 'multi', 'This field is required.') def test_form_error_with_template(self): "POST erroneous data to a form using multiple templates" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view_with_template/', post_data) self.assertContains(response, 'POST data has errors') self.assertTemplateUsed(response, "form_view.html") self.assertTemplateUsed(response, 'base.html') self.assertTemplateNotUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'Enter a valid email address.') def test_unknown_page(self): "GET an invalid URL" response = self.client.get('/unknown_view/') # Check that the response was a 404 self.assertEqual(response.status_code, 404) def test_url_parameters(self): "Make sure that URL ;-parameters are not stripped." response = self.client.get('/unknown_view/;some-parameter') # Check that the path in the response includes it (ignore that it's a 404) self.assertEqual(response.request['PATH_INFO'], '/unknown_view/;some-parameter') def test_view_with_login(self): "Request a page that is protected with @login_required" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') @override_settings( INSTALLED_APPS=['django.contrib.auth'], SESSION_ENGINE='django.contrib.sessions.backends.file', ) def test_view_with_login_when_sessions_app_is_not_installed(self): self.test_view_with_login() def test_view_with_force_login(self): "Request a page that is protected with @login_required" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_method_login(self): "Request a page that is protected with a @login_required method" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Request a page that requires a login response = self.client.get('/login_protected_method_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_method_force_login(self): "Request a page that is protected with a @login_required method" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_method_view/') # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_method_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_login_and_custom_redirect(self): "Request a page that is protected with @login_required(redirect_field_name='redirect_to')" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view_custom_redirect/') self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Request a page that requires a login response = self.client.get('/login_protected_view_custom_redirect/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_force_login_and_custom_redirect(self): """ Request a page that is protected with @login_required(redirect_field_name='redirect_to') """ # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view_custom_redirect/') self.assertRedirects(response, '/accounts/login/?redirect_to=/login_protected_view_custom_redirect/') # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_view_custom_redirect/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') def test_view_with_bad_login(self): "Request a page that is protected with @login, but use bad credentials" login = self.client.login(username='otheruser', password='nopassword') self.assertFalse(login) def test_view_with_inactive_login(self): """ An inactive user may login if the authenticate backend allows it. """ credentials = {'username': 'inactive', 'password': 'password'} self.assertFalse(self.client.login(**credentials)) with self.settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend']): self.assertTrue(self.client.login(**credentials)) @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] ) def test_view_with_inactive_force_login(self): "Request a page that is protected with @login, but use an inactive login" # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in self.client.force_login(self.u2, backend='django.contrib.auth.backends.AllowAllUsersModelBackend') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'inactive') def test_logout(self): "Request a logout after logging in" # Log in self.client.login(username='testclient', password='password') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') # Log out self.client.logout() # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') def test_logout_with_force_login(self): "Request a logout after logging in" # Log in self.client.force_login(self.u1) # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') # Log out self.client.logout() # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'test_client.auth_backends.TestClientBackend', ], ) def test_force_login_with_backend(self): """ Request a page that is protected with @login_required when using force_login() and passing a backend. """ # Get the page without logging in. Should result in 302. response = self.client.get('/login_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/login_protected_view/') # Log in self.client.force_login(self.u1, backend='test_client.auth_backends.TestClientBackend') self.assertEqual(self.u1.backend, 'test_client.auth_backends.TestClientBackend') # Request a page that requires a login response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'test_client.auth_backends.TestClientBackend', ], ) def test_force_login_without_backend(self): """ force_login() without passing a backend and with multiple backends configured should automatically use the first backend. """ self.client.force_login(self.u1) response = self.client.get('/login_protected_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') self.assertEqual(self.u1.backend, 'django.contrib.auth.backends.ModelBackend') @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies") def test_logout_cookie_sessions(self): self.test_logout() def test_view_with_permissions(self): "Request a page that is protected with @permission_required" # Get the page without logging in. Should result in 302. response = self.client.get('/permission_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Log in with wrong permissions. Should result in 302. response = self.client.get('/permission_protected_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_view/') # TODO: Log in with right permissions and request the page again def test_view_with_permissions_exception(self): "Request a page that is protected with @permission_required but raises an exception" # Get the page without logging in. Should result in 403. response = self.client.get('/permission_protected_view_exception/') self.assertEqual(response.status_code, 403) # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Log in with wrong permissions. Should result in 403. response = self.client.get('/permission_protected_view_exception/') self.assertEqual(response.status_code, 403) def test_view_with_method_permissions(self): "Request a page that is protected with a @permission_required method" # Get the page without logging in. Should result in 302. response = self.client.get('/permission_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/') # Log in login = self.client.login(username='testclient', password='password') self.assertTrue(login, 'Could not log in') # Log in with wrong permissions. Should result in 302. response = self.client.get('/permission_protected_method_view/') self.assertRedirects(response, '/accounts/login/?next=/permission_protected_method_view/') # TODO: Log in with right permissions and request the page again def test_external_redirect(self): response = self.client.get('/django_project_redirect/') self.assertRedirects(response, 'https://www.djangoproject.com/', fetch_redirect_response=False) def test_external_redirect_with_fetch_error_msg(self): """ Check that assertRedirects without fetch_redirect_response=False raises a relevant ValueError rather than a non-descript AssertionError. """ response = self.client.get('/django_project_redirect/') msg = ( "The test client is unable to fetch remote URLs (got " "https://www.djangoproject.com/). If the host is served by Django, " "add 'www.djangoproject.com' to ALLOWED_HOSTS. " "Otherwise, use assertRedirects(..., fetch_redirect_response=False)." ) with self.assertRaisesMessage(ValueError, msg): self.assertRedirects(response, 'https://www.djangoproject.com/') def test_session_modifying_view(self): "Request a page that modifies the session" # Session value isn't set initially with self.assertRaises(KeyError): self.client.session['tobacconist'] self.client.post('/session_view/') # Check that the session was modified self.assertEqual(self.client.session['tobacconist'], 'hovercraft') @override_settings( INSTALLED_APPS=[], SESSION_ENGINE='django.contrib.sessions.backends.file', ) def test_sessions_app_is_not_installed(self): self.test_session_modifying_view() @override_settings( INSTALLED_APPS=[], SESSION_ENGINE='django.contrib.sessions.backends.nonexistent', ) def test_session_engine_is_invalid(self): with self.assertRaisesMessage(ImportError, 'nonexistent'): self.test_session_modifying_view() def test_view_with_exception(self): "Request a page that is known to throw an error" with self.assertRaises(KeyError): self.client.get("/broken_view/") def test_mail_sending(self): "Test that mail is redirected to a dummy outbox during test setup" response = self.client.get('/mail_sending_view/') self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, 'Test message') self.assertEqual(mail.outbox[0].body, 'This is a test email') self.assertEqual(mail.outbox[0].from_email, 'from@example.com') self.assertEqual(mail.outbox[0].to[0], 'first@example.com') self.assertEqual(mail.outbox[0].to[1], 'second@example.com') def test_reverse_lazy_decodes(self): "Ensure reverse_lazy works in the test client" data = {'var': 'data'} response = self.client.get(reverse_lazy('get_view'), data) # Check some response details self.assertContains(response, 'This is a test') def test_relative_redirect(self): response = self.client.get('/accounts/') self.assertRedirects(response, '/accounts/login/') def test_relative_redirect_no_trailing_slash(self): response = self.client.get('/accounts/no_trailing_slash') self.assertRedirects(response, '/accounts/login/') def test_mass_mail_sending(self): "Test that mass mail is redirected to a dummy outbox during test setup" response = self.client.get('/mass_mail_sending_view/') self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 2) self.assertEqual(mail.outbox[0].subject, 'First Test message') self.assertEqual(mail.outbox[0].body, 'This is the first test email') self.assertEqual(mail.outbox[0].from_email, 'from@example.com') self.assertEqual(mail.outbox[0].to[0], 'first@example.com') self.assertEqual(mail.outbox[0].to[1], 'second@example.com') self.assertEqual(mail.outbox[1].subject, 'Second Test message') self.assertEqual(mail.outbox[1].body, 'This is the second test email') self.assertEqual(mail.outbox[1].from_email, 'from@example.com') self.assertEqual(mail.outbox[1].to[0], 'second@example.com') self.assertEqual(mail.outbox[1].to[1], 'third@example.com') def test_exception_following_nested_client_request(self): """ A nested test client request shouldn't clobber exception signals from the outer client request. """ with self.assertRaisesMessage(Exception, 'exception message'): self.client.get('/nesting_exception_view/') @override_settings( MIDDLEWARE=['django.middleware.csrf.CsrfViewMiddleware'], ROOT_URLCONF='test_client.urls', ) class CSRFEnabledClientTests(SimpleTestCase): def test_csrf_enabled_client(self): "A client can be instantiated with CSRF checks enabled" csrf_client = Client(enforce_csrf_checks=True) # The normal client allows the post response = self.client.post('/post_view/', {}) self.assertEqual(response.status_code, 200) # The CSRF-enabled client rejects it response = csrf_client.post('/post_view/', {}) self.assertEqual(response.status_code, 403) class CustomTestClient(Client): i_am_customized = "Yes" class CustomTestClientTest(SimpleTestCase): client_class = CustomTestClient def test_custom_test_client(self): """A test case can specify a custom class for self.client.""" self.assertIs(hasattr(self.client, "i_am_customized"), True) def _generic_view(request): return HttpResponse(status=200) @override_settings(ROOT_URLCONF='test_client.urls') class RequestFactoryTest(SimpleTestCase): """Tests for the request factory.""" # A mapping between names of HTTP/1.1 methods and their test views. http_methods_and_views = ( ('get', get_view), ('post', post_view), ('put', _generic_view), ('patch', _generic_view), ('delete', _generic_view), ('head', _generic_view), ('options', _generic_view), ('trace', trace_view), ) def setUp(self): self.request_factory = RequestFactory() def test_request_factory(self): """The request factory implements all the HTTP/1.1 methods.""" for method_name, view in self.http_methods_and_views: method = getattr(self.request_factory, method_name) request = method('/somewhere/') response = view(request) self.assertEqual(response.status_code, 200) def test_get_request_from_factory(self): """ The request factory returns a templated response for a GET request. """ request = self.request_factory.get('/somewhere/') response = get_view(request) self.assertContains(response, 'This is a test') def test_trace_request_from_factory(self): """The request factory returns an echo response for a TRACE request.""" url_path = '/somewhere/' request = self.request_factory.trace(url_path) response = trace_view(request) protocol = request.META["SERVER_PROTOCOL"] echoed_request_line = "TRACE {} {}".format(url_path, protocol) self.assertContains(response, echoed_request_line)
''' LICENSING ------------------------------------------------- golix: A python library for Golix protocol object manipulation. Copyright (C) 2016 Muterra, Inc. Contributors ------------ Nick Badger badg@muterra.io | badg@nickbadger.com | nickbadger.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ------------------------------------------------------ A NOTE ON RANDOM NUMBERS... PyCryptoDome sources randomness from os.urandom(). This should be secure for most applications. HOWEVER, if your system is low on entropy (can be an issue in high-demand applications like servers), urandom *will not block to wait for entropy*, and will revert (ish?) to potentially insufficiently secure pseudorandom generation. In that case, it might be better to source from elsewhere (like a hardware RNG). Some initial temporary thoughts: 1. Need to refactor signing, etc into identities. 2. Identity base class should declare supported cipher suites as a set 3. Each identity class should += the set with their support, allowing for easy multi-inheritance for multiple identity support 4. Identities then insert the author into the file 5. How does this interact with asymmetric objects with symmetric sigs? Should just look for an instance of the object? It would be nice to totally factor crypto awareness out of the objects entirely, except (of course) for address algorithms. 6. From within python, should the identies be forced to ONLY support a single ciphersuite? That would certainly make life easier. A LOT easier. Yeah, let's do that then. Multi-CS identities can multi-subclass, and will need to add some kind of glue code for key reuse. Deal with that later, but it'll probably entail backwards-incompatible changes. 7. Then, the identities should also generate secrets. That will also remove people from screwing up and using ex. random.random(). But what to do with the API for that? Should identity.finalize(obj) return (key, obj) pair or something? That's not going to be useful for all objects though, because not all objects use secrets. Really, the question is, how to handle GEOCs in a way that makes sense? Maybe add an Identity.secrets(ghid) attribute or summat? Though returning just the bytes would be really unfortunate for app development, because you'd have to unpack the generated bytes to figure out the ghid. What about returning a namedtuple, and adding a field for secrets in the GEOC? that might be something to add to the actual objects (ex GEOC) instead of the identity. That would also reduce the burden on identities for state management of generated objects, which should really be handled at a higher level than this library. 8. Algorithm precedence order should be defined globally, but capable of being overwritten ''' # Global dependencies import abc import os from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import hmac from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives import ciphers from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.kdf import hkdf from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from donna25519 import PrivateKey as ECDHPrivate from donna25519 import PublicKey as ECDHPublic from smartyparse import ParseError # Interpackage dependencies from .exceptions import SecurityError from .utils import Ghid from .utils import _dummy_ghid from .crypto_utils import ADDRESS_ALGOS from .crypto_utils import Secret from .crypto_utils import AsymHandshake from .crypto_utils import AsymAck from .crypto_utils import AsymNak from .crypto_utils import _dummy_asym from .crypto_utils import _dummy_mac from .crypto_utils import _dummy_signature from .crypto_utils import _dummy_address from .crypto_utils import _dummy_pubkey from .crypto_utils import _dummy_pubkey_exchange from ._getlow import GIDC from ._getlow import GEOC from ._getlow import GOBS from ._getlow import GOBD from ._getlow import GDXX from ._getlow import GARQ from ._getlow import GARQHandshake from ._getlow import GARQAck from ._getlow import GARQNak # Some globals CRYPTO_BACKEND = default_backend() DEFAULT_ADDRESSER = 1 DEFAULT_CIPHER = 1 # Control * imports __all__ = [ 'FirstParty1', 'SecondParty1', 'ThirdParty1' ] # Some utilities class _NoopSHA512(hashes.SHA512): def __init__(self, noopdata, *args, **kwargs): self.__data = noopdata super().__init__(*args, **kwargs) self.algorithm = self def copy(self): ''' Total NOOP, because self cannot change. ''' return self def update(self, data): # Noop noop noop pass def finalize(self): # Yay we get to do something! return self.__data class _IdentityBase(metaclass=abc.ABCMeta): def __init__(self, keys, ghid): self._ghid = ghid try: self._signature_key = keys['signature'] self._encryption_key = keys['encryption'] self._exchange_key = keys['exchange'] except (KeyError, TypeError) as e: raise RuntimeError( 'Generating ID from existing keys requires dict-like obj ' 'with "signature", "encryption", and "exchange" keys.' ) from e @property def ghid(self): return self._ghid @property def ciphersuite(self): return self._ciphersuite @classmethod def _dispatch_address(cls, address_algo): if address_algo == 'default': address_algo = cls.DEFAULT_ADDRESS_ALGO elif address_algo not in ADDRESS_ALGOS: raise ValueError( 'Address algorithm unavailable for use: ' + str(address_algo) ) return address_algo @classmethod def _typecheck_secret(cls, secret): # Awkward but gets the job done if not isinstance(secret, Secret): return False if secret.cipher != cls._ciphersuite: return False return True class _ObjectHandlerBase(metaclass=abc.ABCMeta): ''' Base class for anything that needs to unpack Golix objects. ''' @staticmethod def unpack_identity(packed): gidc = GIDC.unpack(packed) return gidc @staticmethod def unpack_container(packed): geoc = GEOC.unpack(packed) return geoc @staticmethod def unpack_bind_static(packed): gobs = GOBS.unpack(packed) return gobs @staticmethod def unpack_bind_dynamic(packed): gobd = GOBD.unpack(packed) return gobd @staticmethod def unpack_debind(packed): gdxx = GDXX.unpack(packed) return gdxx @staticmethod @abc.abstractmethod def unpack_request(packed): ''' Unpacks requests. Different for firstparties and thirdparties, but used by both in unpack_any. ''' pass def unpack_any(self, packed): ''' Try to unpack using any available parser. Raises TypeError if no parser is found. ''' for parser in (self.unpack_identity, self.unpack_container, self.unpack_bind_static, self.unpack_bind_dynamic, self.unpack_debind, self.unpack_request): try: obj = parser(packed) # Hm, don't really like this. except (ParseError, TypeError): pass else: break else: raise ParseError( 'Packed data does not appear to be a Golix object.' ) return obj class _SecondPartyBase(metaclass=abc.ABCMeta): @classmethod def from_keys(cls, keys, address_algo): ''' Creates a secondparty from unpacked keys -- DON'T use this if you have an existing MIDC. ''' try: # Turn them into bytes first. packed_keys = cls._pack_keys(keys) except (KeyError, TypeError) as e: raise RuntimeError( 'Generating ID from existing keys requires dict-like obj ' 'with "signature", "encryption", and "exchange" keys.' ) from e gidc = GIDC( signature_key=packed_keys['signature'], encryption_key=packed_keys['encryption'], exchange_key=packed_keys['exchange'] ) gidc.pack(cipher=cls._ciphersuite, address_algo=address_algo) ghid = gidc.ghid self = cls(keys=keys, ghid=ghid) self.packed = gidc.packed return self @classmethod def from_identity(cls, gidc): ''' Loads an unpacked gidc into a SecondParty. Note that this does not select the correct SecondParty for any given gidc's ciphersuite. ''' ghid = gidc.ghid keys = cls._unpack_keys({ 'signature': gidc.signature_key, 'encryption': gidc.encryption_key, 'exchange': gidc.exchange_key }) self = cls(keys=keys, ghid=ghid) return self @classmethod def from_packed(cls, packed): ''' Loads a packed gidc into a SecondParty. Also does not select the correct SecondParty for the packed gidc's ciphersuite. ''' gidc = _ObjectHandlerBase.unpack_identity(packed) self = cls.from_identity(gidc) self.packed = packed return self @classmethod @abc.abstractmethod def _pack_keys(cls, keys): ''' Convert self.keys from objects used for crypto operations into bytes-like objects suitable for output into a GIDC. ''' pass @classmethod @abc.abstractmethod def _unpack_keys(cls, keys): ''' Convert keys dic into objects used for crypto operations from bytes-like objects used in GIDC. ''' pass class _FirstPartyBase(_ObjectHandlerBase, metaclass=abc.ABCMeta): DEFAULT_ADDRESS_ALGO = DEFAULT_ADDRESSER def __init__(self, keys=None, ghid=None, address_algo='default', *args, **kwargs): self.address_algo = self._dispatch_address(address_algo) # Load an existing identity if keys is not None and ghid is not None: self._second_party = self._generate_second_party( keys, self.address_algo ) # Catch any improper declaration elif keys is not None or ghid is not None: raise TypeError( 'Generating an ID manually from existing keys requires ' 'both keys and ghid.' ) # Generate a new identity else: keys = self._generate_keys() self._second_party = self._generate_second_party( keys, self.address_algo ) ghid = self._second_party.ghid # Now dispatch super() with the adjusted keys, ghid super().__init__(keys=keys, ghid=ghid, *args, **kwargs) @classmethod def _typecheck_2ndparty(cls, obj): # Type check the partner. Must be SecondPartyX or similar. if not isinstance(obj, cls._2PID): raise TypeError( 'Object must be a SecondParty of compatible type ' 'with the FirstParty initiating the request/ack/nak.' ) else: return True @property def second_party(self): # Note: this is going to error out if we're loading an identity, since # we're not currently passing in the packed identity. return self._second_party def make_container(self, secret, plaintext): if not self._typecheck_secret(secret): raise TypeError( 'Secret must be a properly-formatted Secret compatible with ' 'the current identity\'s declared ciphersuite.' ) geoc = GEOC(author=self.ghid) geoc.payload = self._encrypt(secret, plaintext) geoc.pack(cipher=self.ciphersuite, address_algo=self.address_algo) signature = self._sign(geoc.ghid.address) geoc.pack_signature(signature) return geoc def make_bind_static(self, target): gobs = GOBS( binder = self.ghid, target = target ) gobs.pack(cipher=self.ciphersuite, address_algo=self.address_algo) signature = self._sign(gobs.ghid.address) gobs.pack_signature(signature) return gobs def make_bind_dynamic(self, counter, target_vector, ghid_dynamic=None): gobd = GOBD( binder = self.ghid, counter = counter, target_vector = target_vector, ghid_dynamic = ghid_dynamic ) gobd.pack(cipher=self.ciphersuite, address_algo=self.address_algo) signature = self._sign(gobd.ghid.address) gobd.pack_signature(signature) return gobd def make_debind(self, target): gdxx = GDXX( debinder = self.ghid, target = target ) gdxx.pack(cipher=self.ciphersuite, address_algo=self.address_algo) signature = self._sign(gdxx.ghid.address) gdxx.pack_signature(signature) return gdxx def make_handshake(self, secret, target): return AsymHandshake( author = self.ghid, target = target, secret = secret ) def make_ack(self, target, status=0): return AsymAck( author = self.ghid, target = target, status = status ) def make_nak(self, target, status=0): return AsymNak( author = self.ghid, target = target, status = status ) def make_request(self, recipient, request): self._typecheck_2ndparty(recipient) # I'm actually okay with this performance hit, since it forces some # level of type checking here. Which is, I think, in this case, good. if isinstance(request, AsymHandshake): request = GARQHandshake( author = request.author, target = request.target, secret = request.secret ) elif isinstance(request, AsymAck): request = GARQAck( author = request.author, target = request.target, status = request.status ) elif isinstance(request, AsymNak): request = GARQNak( author = request.author, target = request.target, status = request.status ) else: raise TypeError( 'Request must be an AsymHandshake, AsymAck, or AsymNak ' '(or subclass thereof).' ) request.pack() plaintext = request.packed # Convert the plaintext to a proper payload and create a garq from it payload = self._encrypt_asym(recipient, plaintext) del plaintext garq = GARQ( recipient = recipient.ghid, payload = payload ) # Pack 'er up and generate a MAC for it garq.pack(cipher=self.ciphersuite, address_algo=self.address_algo) garq.pack_signature( self._mac( key = self._derive_shared(recipient), data = garq.ghid.address ) ) return garq @classmethod def receive_container(cls, author, secret, container): if not isinstance(container, GEOC): raise TypeError( 'Container must be an unpacked GEOC, for example, as returned ' 'from unpack_container.' ) cls._typecheck_2ndparty(author) signature = container.signature cls._verify(author, signature, container.ghid.address) plaintext = cls._decrypt(secret, container.payload) # This will need to be converted into a namedtuple or something return plaintext @classmethod def receive_bind_static(cls, binder, binding): if not isinstance(binding, GOBS): raise TypeError( 'Binding must be an unpacked GOBS, for example, as returned ' 'from unpack_bind_static.' ) cls._typecheck_2ndparty(binder) signature = binding.signature cls._verify(binder, signature, binding.ghid.address) # This will need to be converted into a namedtuple or something return binding.target @classmethod def receive_bind_dynamic(cls, binder, binding): if not isinstance(binding, GOBD): raise TypeError( 'Binding must be an unpacked GOBD, for example, as returned ' 'from unpack_bind_dynamic.' ) cls._typecheck_2ndparty(binder) signature = binding.signature cls._verify(binder, signature, binding.ghid.address) # This will need to be converted into a namedtuple or something return binding.target @classmethod def receive_debind(cls, debinder, debinding): if not isinstance(debinding, GDXX): raise TypeError( 'Debinding must be an unpacked GDXX, for example, as returned ' 'from unpack_debind.' ) cls._typecheck_2ndparty(debinder) signature = debinding.signature cls._verify(debinder, signature, debinding.ghid.address) # This will need to be converted into a namedtuple or something return debinding.target def unpack_request(self, packed): garq = GARQ.unpack(packed) plaintext = self._decrypt_asym(garq.payload) # Could do this with a loop, but it gets awkward when trying to # assign stuff to the resulting object. try: unpacked = GARQHandshake.unpack(plaintext) request = AsymHandshake( author = unpacked.author, target = unpacked.target, secret = unpacked.secret ) except ParseError: try: unpacked = GARQAck.unpack(plaintext) request = AsymAck( author = unpacked.author, target = unpacked.target, status = unpacked.status ) except ParseError: try: unpacked = GARQNak.unpack(plaintext) request = AsymNak( author = unpacked.author, target = unpacked.target, status = unpacked.status ) except ParseError: raise SecurityError('Could not securely unpack request.') garq._plaintext = request garq._author = request.author return garq def receive_request(self, requestor, request): ''' Verifies the request and exposes its contents. ''' # Typecheck all the things self._typecheck_2ndparty(requestor) # Also make sure the request is something we've already unpacked if not isinstance(request, GARQ): raise TypeError( 'Request must be an unpacked GARQ, as returned from ' 'unpack_request.' ) try: plaintext = request._plaintext except AttributeError as e: raise TypeError( 'Request must be an unpacked GARQ, as returned from ' 'unpack_request.' ) from e self._verify_mac( key = self._derive_shared(requestor), data = request.ghid.address, mac = request.signature ) return plaintext @classmethod @abc.abstractmethod def _generate_second_party(cls, keys, address_algo): ''' MUST ONLY be called when generating one from scratch, not when loading one. Loading must always be done directly through loading a SecondParty. ''' pass @abc.abstractmethod def _generate_keys(self): ''' Create a set of keys for use in the identity. Must return a mapping of keys with the following values: { 'signature': <signature key>, 'encryption': <encryption key>, 'exchange': <exchange key> } In a form that is usable by the rest of the FirstParty crypto functions (this is dependent on the individual class' implementation, ex its crypto library). ''' pass @classmethod @abc.abstractmethod def new_secret(cls, *args, **kwargs): ''' Placeholder method to create new symmetric secret. Returns a Secret(). ''' return Secret(cipher=cls._ciphersuite, *args, **kwargs) @abc.abstractmethod def _sign(self, data): ''' Placeholder signing method. ''' pass @abc.abstractmethod def _verify(self, public, signature, data): ''' Verifies signature against data using SecondParty public. raises SecurityError if verification fails. returns True on success. ''' pass @abc.abstractmethod def _encrypt_asym(self, public, data): ''' Placeholder asymmetric encryptor. ''' pass @abc.abstractmethod def _decrypt_asym(self, data): ''' Placeholder asymmetric decryptor. ''' pass @classmethod @abc.abstractmethod def _decrypt(cls, secret, data): ''' Placeholder symmetric decryptor. ''' pass @classmethod @abc.abstractmethod def _encrypt(cls, secret, data): ''' Placeholder symmetric encryptor. ''' pass @abc.abstractmethod def _derive_shared(self, partner): ''' Derive a shared secret (not necessarily a Secret!) with the partner. ''' pass @classmethod @abc.abstractmethod def _mac(cls, key, data): ''' Generate a MAC for data using key. ''' pass @classmethod @abc.abstractmethod def _verify_mac(cls, key, mac, data): ''' Generate a MAC for data using key. ''' pass @abc.abstractmethod def _serialize(self): ''' Convert private keys into a standardized format. Don't save, just return a dictionary with bytes objects: { 'ghid': self.ghid, 'signature': self._signature_key, 'encryption': self._encryption_key, 'exchange': self._exchange_key } (etc) ''' pass @classmethod @abc.abstractmethod def _from_serialized(cls, serialization): ''' Create an instance of the class from a dictionary as created by cls._serialize. ''' pass class _ThirdPartyBase(_ObjectHandlerBase, metaclass=abc.ABCMeta): ''' Subclass this (on a per-ciphersuite basis) for servers, and other parties that have no access to privileged information. They can only verify. ''' @property def ciphersuite(self): return self._ciphersuite @classmethod def _dispatch_address(cls, address_algo): if address_algo == 'default': address_algo = cls.DEFAULT_ADDRESS_ALGO elif address_algo not in ADDRESS_ALGOS: raise ValueError( 'Address algorithm unavailable for use: ' + str(address_algo) ) return address_algo @staticmethod def unpack_object(packed): ''' Unpacks any Golix object. ''' success = False for golix_format in (GIDC, GEOC, GOBS, GOBD, GDXX, GARQ): try: obj = golix_format.unpack(packed) success = True # Hm, don't really like this. except (ParseError, TypeError): pass if not success: raise ParseError( 'Packed data does not appear to be a Golix object.' ) return obj @classmethod def unpack_request(cls, packed): ''' Unpack public everything from a request. (Cannot verify, at least for the existing ciphersuites, as of 2016-03). ''' garq = GARQ.unpack(packed) return garq @classmethod def verify_object(cls, second_party, obj): ''' Verifies the signature of any symmetric object (aka everything except GARQ) against data. raises TypeError if obj is an asymmetric object (or otherwise unsupported). raises SecurityError if verification fails. returns True on success. ''' if isinstance(obj, GEOC) or \ isinstance(obj, GOBS) or \ isinstance(obj, GOBD) or \ isinstance(obj, GDXX): return cls._verify( public = second_party, signature = obj.signature, data = obj.ghid.address ) elif isinstance(obj, GARQ): raise ValueError( 'Asymmetric objects cannot be verified by third parties. ' 'They can only be verified by their recipients.' ) elif isinstance(obj, GIDC): raise ValueError( 'Identity containers are inherently un-verified.' ) else: raise TypeError('Obj must be a Golix object: GIDC, GEOC, etc.') @classmethod @abc.abstractmethod def _verify(cls, public, signature, data): ''' Verifies signature against data using SecondParty public. raises SecurityError if verification fails. returns True on success. ''' pass class SecondParty0(_SecondPartyBase, _IdentityBase): _ciphersuite = 0 @classmethod def _pack_keys(cls, keys): return keys @classmethod def _unpack_keys(cls, keys): return keys class FirstParty0(_FirstPartyBase, _IdentityBase): ''' FOR TESTING PURPOSES ONLY. Entirely inoperative. Correct API, but ignores all input, creating only a symbolic output. NOTE THAT INHERITANCE ORDER MATTERS! Must be first a FirstParty, and second an Identity. ''' _ciphersuite = 0 _2PID = SecondParty0 # Well it's not exactly repeating yourself, though it does mean there # are sorta two ways to perform decryption. Best practice = always decrypt # using the author's SecondParty @classmethod def _generate_second_party(cls, keys, address_algo): keys = {} keys['signature'] = _dummy_pubkey keys['encryption'] = _dummy_pubkey keys['exchange'] = _dummy_pubkey_exchange return cls._2PID.from_keys(keys, address_algo) def _generate_keys(self): keys = {} keys['signature'] = _dummy_pubkey keys['encryption'] = _dummy_pubkey keys['exchange'] = _dummy_pubkey_exchange return keys def _serialize(self): return { 'ghid': bytes(self.ghid), 'signature': self._signature_key, 'encryption': self._encryption_key, 'exchange': self._exchange_key } @classmethod def _from_serialized(cls, serialization): try: ghid = Ghid.from_bytes(serialization['ghid']) keys = { 'signature': serialization['signature'], 'encryption': serialization['encryption'], 'exchange': serialization['exchange'] } except (TypeError, KeyError) as e: raise TypeError( 'serialization must be compatible with _serialize.' ) from e return cls(keys=keys, ghid=ghid) @classmethod def new_secret(cls): ''' Placeholder method to create new symmetric secret. ''' return super().new_secret(key=bytes(32), seed=None) def _sign(self, data): ''' Placeholder signing method. Data must be bytes-like. Private key should be a dictionary formatted with all necessary components for a private key (?). ''' return _dummy_signature @classmethod def _verify(cls, public, signature, data): ''' Verifies an author's signature against bites. Errors out if unsuccessful. Returns True if successful. Data must be bytes-like. public_key should be a dictionary formatted with all necessary components for a public key (?). Signature must be bytes-like. ''' cls._typecheck_2ndparty(public) return True def _encrypt_asym(self, public, data): ''' Placeholder asymmetric encryptor. Data should be bytes-like. Public key should be a dictionary formatted with all necessary components for a public key. ''' self._typecheck_2ndparty(public) return _dummy_asym def _decrypt_asym(self, data): ''' Placeholder asymmetric decryptor. Maybe add kwarguments do define what kind of internal object is returned? That would be smart. Or, even better, do an arbitrary object content, and then encode what class of internal object to use there. That way, it's not possible to accidentally encode secrets publicly, but you can also emulate behavior of normal exchange. Data should be bytes-like. Public key should be a dictionary formatted with all necessary components for a public key. ''' # Note that this will error out when trying to load components, # since it's 100% an invalid declaration of internal content. # But, it's a good starting point. return _dummy_asym @classmethod def _decrypt(cls, secret, data): ''' Placeholder symmetric decryptor. Data should be bytes-like. Key should be bytes-like. ''' return data @classmethod def _encrypt(cls, secret, data): ''' Placeholder symmetric encryptor. Data should be bytes-like. Key should be bytes-like. ''' return data def _derive_shared(self, partner): ''' Derive a shared secret with the partner. ''' self._typecheck_2ndparty(partner) return b'[[ Placeholder shared secret ]]' @classmethod def _mac(cls, key, data): ''' Generate a MAC for data using key. ''' return _dummy_mac @classmethod def _verify_mac(cls, key, mac, data): return True class ThirdParty0(_ThirdPartyBase): _ciphersuite = 0 # Note that, since this classmethod is from a different class, the # cls passed internally will be FirstParty0, NOT ThirdParty0. _verify = FirstParty0._verify class SecondParty1(_SecondPartyBase, _IdentityBase): _ciphersuite = 1 @classmethod def _pack_keys(cls, keys): packkeys = { 'signature': int.to_bytes( keys['signature'].public_numbers().n, length=512, byteorder='big'), 'encryption': int.to_bytes( keys['encryption'].public_numbers().n, length=512, byteorder='big'), 'exchange': keys['exchange'].public, } return packkeys @classmethod def _unpack_keys(cls, keys): n_sig = int.from_bytes(keys['signature'], byteorder='big') n_enc = int.from_bytes(keys['encryption'], byteorder='big') nums_sig = rsa.RSAPublicNumbers(n=n_sig, e=65537) nums_enc = rsa.RSAPublicNumbers(n=n_enc, e=65537) unpackkeys = { 'signature': nums_sig.public_key(CRYPTO_BACKEND), 'encryption': nums_enc.public_key(CRYPTO_BACKEND), 'exchange': ECDHPublic(bytes(keys['exchange'])), } return unpackkeys # RSA-PSS Signature salt length. # Put these here because explicit is better than implicit! _PSS_SALT_LENGTH = hashes.SHA512.digest_size class FirstParty1(_FirstPartyBase, _IdentityBase): ''' ... Hmmm ''' _ciphersuite = 1 _2PID = SecondParty1 # Well it's not exactly repeating yourself, though it does mean there # are sorta two ways to perform decryption. Best practice = always decrypt # using the author's SecondParty @classmethod def _generate_second_party(cls, keys, address_algo): pubkeys = { 'signature': keys['signature'].public_key(), 'encryption': keys['encryption'].public_key(), 'exchange': keys['exchange'].get_public() } del keys return cls._2PID.from_keys(keys=pubkeys, address_algo=address_algo) @classmethod def _generate_keys(cls): keys = {} keys['signature'] = rsa.generate_private_key( public_exponent = 65537, key_size = 4096, backend = CRYPTO_BACKEND ) keys['encryption'] = rsa.generate_private_key( public_exponent = 65537, key_size = 4096, backend = CRYPTO_BACKEND ) keys['exchange'] = ECDHPrivate() return keys def _serialize(self): return { 'ghid': bytes(self.ghid), 'signature': self._signature_key.private_bytes( encoding = serialization.Encoding.DER, format = serialization.PrivateFormat.PKCS8, encryption_algorithm = serialization.NoEncryption() ), 'encryption': self._encryption_key.private_bytes( encoding = serialization.Encoding.DER, format = serialization.PrivateFormat.PKCS8, encryption_algorithm = serialization.NoEncryption() ), 'exchange': bytes(self._exchange_key.private) } @classmethod def _from_serialized(cls, condensed): try: ghid = Ghid.from_bytes(condensed['ghid']) keys = { 'signature': serialization.load_der_private_key( data = condensed['signature'], password = None, backend = CRYPTO_BACKEND ), 'encryption': serialization.load_der_private_key( data = condensed['encryption'], password = None, backend = CRYPTO_BACKEND ), 'exchange': ECDHPrivate.load(condensed['exchange']) } except (TypeError, KeyError) as e: raise TypeError( 'serialization must be compatible with _serialize.' ) from e return cls(keys=keys, ghid=ghid) @classmethod def new_secret(cls): ''' Returns a new secure Secret(). ''' key = os.urandom(32) nonce = os.urandom(16) return super().new_secret(key=key, seed=nonce) @classmethod def _encrypt(cls, secret, data): ''' Symmetric encryptor. ''' # Could we do eg memoryview instead? if not isinstance(data, bytes): data = bytes(data) instance = ciphers.Cipher( ciphers.algorithms.AES(secret.key), ciphers.modes.CTR(secret.seed), backend = CRYPTO_BACKEND ) worker = instance.encryptor() return worker.update(data) + worker.finalize() @classmethod def _decrypt(cls, secret, data): ''' Symmetric decryptor. Handle multiple ciphersuites by having a SecondParty for whichever author created it, and calling their decrypt instead. ''' # Could we do eg memoryview instead? if not isinstance(data, bytes): data = bytes(data) instance = ciphers.Cipher( ciphers.algorithms.AES(secret.key), ciphers.modes.CTR(secret.seed), backend = CRYPTO_BACKEND ) worker = instance.decryptor() return worker.update(data) + worker.finalize() def _sign(self, data): ''' Signing method. ''' signer = self._signature_key.signer( padding.PSS( mgf = padding.MGF1(hashes.SHA512()), salt_length = _PSS_SALT_LENGTH ), hashes.SHA512() ) signer._hash_ctx = _NoopSHA512(data) return signer.finalize() # IT WOULD BE NICE TO BE ABLE TO USE THIS GRRRRRRRRRRRRRRRRRR signature = self._signature_key.sign( bytes(data), padding.PSS( mgf = padding.MGF1(hashes.SHA512()), salt_length = _PSS_SALT_LENGTH ), _NoopSHA512(data) ) return signature @classmethod def _verify(cls, public, signature, data): ''' Verifies an author's signature against bites. Errors out if unsuccessful. Returns True if successful. Data must be bytes-like. public_key should be a dictionary formatted with all necessary components for a public key (?). Signature must be bytes-like. ''' cls._typecheck_2ndparty(public) try: verifier = public._signature_key.verifier( bytes(signature), padding.PSS( mgf = padding.MGF1(hashes.SHA512()), salt_length = _PSS_SALT_LENGTH ), hashes.SHA512() ) verifier._hash_ctx = _NoopSHA512(data) verifier.verify() # IT WOULD BE NICE TO BE ABLE TO USE THIS TOO!!!! grumble grumble # public._signature_key.verify( # bytes(signature), # bytes(data), # padding.PSS( # mgf = padding.MGF1(hashes.SHA512()), # salt_length = _PSS_SALT_LENGTH # ), # _NoopSHA512(data) # ) except InvalidSignature as exc: raise SecurityError('Failed to verify signature.') from exc return True def _encrypt_asym(self, public, data): ''' Placeholder asymmetric encryptor. Data should be bytes-like. Public key should be a dictionary formatted with all necessary components for a public key. ''' self._typecheck_2ndparty(public) ciphertext = public._encryption_key.encrypt( bytes(data), padding.OAEP( mgf = padding.MGF1(algorithm=hashes.SHA512()), algorithm = hashes.SHA512(), label = b'' ) ) return ciphertext def _decrypt_asym(self, data): ''' Placeholder asymmetric decryptor. ''' plaintext = self._encryption_key.decrypt( bytes(data), padding.OAEP( mgf = padding.MGF1(algorithm=hashes.SHA512()), algorithm = hashes.SHA512(), label = b'' ) ) return plaintext def _derive_shared(self, partner): ''' Derive a shared secret with the partner. ''' # Call the donna25519 exchange method and return bytes ecdh = self._exchange_key.do_exchange(partner._exchange_key) # Get both of our addresses and then the bitwise XOR of them both my_hash = self.ghid.address their_hash = partner.ghid.address salt = bytes([a ^ b for a, b in zip(my_hash, their_hash)]) instance = hkdf.HKDF( algorithm = hashes.SHA512(), length = hashes.SHA512.digest_size, salt = salt, info = b'', backend = CRYPTO_BACKEND ) key = instance.derive(ecdh) # Might as well do this immediately, not that it really adds anything del ecdh, my_hash, their_hash, salt return key @classmethod def _mac(cls, key, data): ''' Generate a MAC for data using key. ''' h = hmac.HMAC( key, hashes.SHA512(), backend = CRYPTO_BACKEND ) h.update(data) return h.finalize() @classmethod def _verify_mac(cls, key, mac, data): ''' Verify an existing MAC. ''' if not isinstance(mac, bytes): mac = bytes(mac) if not isinstance(data, bytes): data = bytes(data) h = hmac.HMAC( key, hashes.SHA512(), backend = CRYPTO_BACKEND ) h.update(data) try: h.verify(mac) except InvalidSignature as exc: raise SecurityError('Failed to verify MAC.') from exc return True class ThirdParty1(_ThirdPartyBase): _ciphersuite = 1 # Note that, since this classmethod is from a different class, the # cls passed internally will be FirstParty0, NOT ThirdParty0. _verify = FirstParty1._verify
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """I totally stole most of this from melange, thx guys!!!""" import collections import inspect import os import shutil import time import uuid from eventlet.timeout import Timeout import jinja2 from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import importutils from oslo_utils import strutils from passlib import pwd import six import six.moves.urllib.parse as urlparse from trove.common import cfg from trove.common import exception from trove.common.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) import_class = importutils.import_class import_object = importutils.import_object import_module = importutils.import_module bool_from_string = strutils.bool_from_string execute = processutils.execute def build_jinja_environment(): env = jinja2.Environment( autoescape=True, loader=jinja2.ChoiceLoader([ jinja2.FileSystemLoader(CONF.template_path), jinja2.PackageLoader("trove", "templates") ])) # Add some basic operation not built-in. env.globals['max'] = max env.globals['min'] = min return env ENV = build_jinja_environment() def pagination_limit(limit, default_limit): limit = int(limit or default_limit) return min(limit, default_limit) def create_method_args_string(*args, **kwargs): """Returns a string representation of args and keyword args. I.e. for args=1,2,3 and kwargs={'a':4, 'b':5} you'd get: "1,2,3,a=4,b=5" """ # While %s turns a var into a string but in some rare cases explicit # repr() is less likely to raise an exception. arg_strs = [repr(arg) for arg in args] arg_strs += ['%s=%s' % (repr(key), repr(value)) for (key, value) in kwargs.items()] return ', '.join(arg_strs) def stringify_keys(dictionary): if dictionary is None: return None return {str(key): value for key, value in dictionary.items()} def exclude(key_values, *exclude_keys): if key_values is None: return None return {key: value for key, value in key_values.items() if key not in exclude_keys} def generate_uuid(): return str(uuid.uuid4()) def raise_if_process_errored(process, exception): try: err = process.stderr.read() if err: raise exception(err) except OSError: pass def clean_out(folder): for root, dirs, files in os.walk(folder): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) class cached_property(object): """A decorator that converts a function into a lazy property. Taken from : https://github.com/nshah/python-memoize The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value: class Foo(object): @cached_property def bar(self): # calculate something important here return 42 """ def __init__(self, func, name=None, doc=None): self.func = func self.__name__ = name or func.__name__ self.__doc__ = doc or func.__doc__ def __get__(self, obj, owner): if obj is None: return self value = self.func(obj) setattr(obj, self.__name__, value) return value class MethodInspector(object): def __init__(self, func): self._func = func @cached_property def required_args(self): return self.args[0:self.required_args_count] @cached_property def optional_args(self): keys = self.args[self.required_args_count: len(self.args)] return zip(keys, self.defaults) @cached_property def defaults(self): return self.argspec.defaults or () @cached_property def required_args_count(self): return len(self.args) - len(self.defaults) @cached_property def args(self): args = self.argspec.args if inspect.ismethod(self._func): args.pop(0) return args @cached_property def argspec(self): return inspect.getargspec(self._func) def __str__(self): optionals = ["[{0}=<{0}>]".format(k) for k, v in self.optional_args] required = ["{0}=<{0}>".format(arg) for arg in self.required_args] args_str = ' '.join(required + optionals) return "%s %s" % (self._func.__name__, args_str) def build_polling_task(retriever, condition=lambda value: value, sleep_time=1, time_out=None): start_time = time.time() def poll_and_check(): obj = retriever() if condition(obj): raise loopingcall.LoopingCallDone(retvalue=obj) if time_out is not None and time.time() - start_time > time_out: raise exception.PollTimeOut return loopingcall.BackOffLoopingCall( f=poll_and_check).start(initial_delay=False, starting_interval=sleep_time, max_interval=30, timeout=time_out) def poll_until(retriever, condition=lambda value: value, sleep_time=1, time_out=None): """Retrieves object until it passes condition, then returns it. If time_out_limit is passed in, PollTimeOut will be raised once that amount of time is eclipsed. """ return build_polling_task(retriever, condition=condition, sleep_time=sleep_time, time_out=time_out).wait() # Copied from nova.api.openstack.common in the old code. def get_id_from_href(href): """Return the id or uuid portion of a url. Given: 'http://www.foo.com/bar/123?q=4' Returns: '123' Given: 'http://www.foo.com/bar/abc123?q=4' Returns: 'abc123' """ return urlparse.urlsplit("%s" % href).path.split('/')[-1] def execute_with_timeout(*args, **kwargs): time = kwargs.pop('timeout', CONF.command_process_timeout) log_output_on_error = kwargs.pop('log_output_on_error', False) timeout = Timeout(time) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if log_output_on_error: LOG.error( _("Command '%(cmd)s' failed. %(description)s " "Exit code: %(exit_code)s\nstderr: %(stderr)s\n" "stdout: %(stdout)s"), {'cmd': e.cmd, 'description': e.description or '', 'exit_code': e.exit_code, 'stderr': e.stderr, 'stdout': e.stdout}) raise except Timeout as t: if t is not timeout: LOG.error(_("Got a timeout but not the one expected.")) raise else: msg = (_("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") % {'time': time, 'args': args, 'kwargs': kwargs}) LOG.error(msg) raise exception.ProcessExecutionError(msg) finally: timeout.cancel() def correct_id_with_req(id, request): # Due to a shortcoming with the way Trove uses routes.mapper, # URL entities right of the last slash that contain at least # one . are routed to our service without that suffix, as # it was interpreted as a filetype This method looks at the # request, and if applicable, reattaches the suffix to the id. routing_args = request.environ.get('wsgiorg.routing_args', []) for routing_arg in routing_args: try: found = routing_arg.get('format', '') if found and found not in CONF.expected_filetype_suffixes: return "%s.%s" % (id, found) except (AttributeError, KeyError): # Not the relevant routing_args entry. pass return id def generate_random_password(password_length=None): password_length = ( password_length or cfg.get_configuration_property('default_password_length') ) return pwd.genword(length=password_length) def try_recover(func): def _decorator(*args, **kwargs): recover_func = kwargs.pop("recover_func", None) try: func(*args, **kwargs) except Exception: if recover_func is not None: recover_func(func) else: LOG.debug("No recovery method defined for %(func)s", { 'func': func.__name__}) raise return _decorator def unpack_singleton(container): """Unpack singleton collections. Check whether a given collection is a singleton (has exactly one element) and unpack it if that is the case. Return the original collection otherwise. """ if is_collection(container) and len(container) == 1: return unpack_singleton(container[0]) return container def is_collection(item): """Return True is a given item is an iterable collection, but not a string. """ return (isinstance(item, collections.Iterable) and not isinstance(item, (bytes, six.text_type))) def format_output(message, format_len=79, truncate_len=None, replace_index=0): """Recursive function to try and keep line lengths below a certain amount, so they can be displayed nicely on the command-line or UI. Tries replacement patterns one at a time (in round-robin fashion) that insert \n at strategic spots. """ replacements = [['. ', '.\n'], [' (', '\n('], [': ', ':\n ']] replace_index %= len(replacements) if not isinstance(message, list): message = message.splitlines(1) msg_list = [] for line in message: if len(line) > format_len: ok_to_split_again = False for count in range(0, len(replacements)): lines = line.replace( replacements[replace_index][0], replacements[replace_index][1], 1 ).splitlines(1) replace_index = (replace_index + 1) % len(replacements) if len(lines) > 1: ok_to_split_again = True break for item in lines: # If we spilt, but a line is still too long, do it again if ok_to_split_again and len(item) > format_len: item = format_output(item, format_len=format_len, replace_index=replace_index) msg_list.append(item) else: msg_list.append(line) msg_str = "".join(msg_list) if truncate_len and len(msg_str) > truncate_len: msg_str = msg_str[:truncate_len - 3] + '...' return msg_str def to_gb(bytes): """ This was moved from dbaas.py so that it could be used as widely as a utility function. The tests corresponding to this were also moved out from test_dbaas.py to test_utils.py. """ if bytes == 0: return 0.0 size = bytes / 1024.0 ** 3 # Make sure we don't return 0.0 if the size is greater than 0 return max(round(size, 2), 0.01) def to_mb(bytes): """ This was moved from dbaas.py so that it could be used as widely as a utility function. The tests corresponding to this were also moved out from test_dbaas.py to test_utils.py. """ if bytes == 0: return 0.0 size = bytes / 1024.0 ** 2 # Make sure we don't return 0.0 if the size is greater than 0 return max(round(size, 2), 0.01)
from .base import ( Submodule, UpdateProgress ) from .util import find_first_remote_branch from git.exc import InvalidGitRepositoryError import git import logging # typing ------------------------------------------------------------------- from typing import TYPE_CHECKING, Union from git.types import Commit_ish if TYPE_CHECKING: from git.repo import Repo from git.util import IterableList # ---------------------------------------------------------------------------- __all__ = ["RootModule", "RootUpdateProgress"] log = logging.getLogger('git.objects.submodule.root') log.addHandler(logging.NullHandler()) class RootUpdateProgress(UpdateProgress): """Utility class which adds more opcodes to the UpdateProgress""" REMOVE, PATHCHANGE, BRANCHCHANGE, URLCHANGE = [ 1 << x for x in range(UpdateProgress._num_op_codes, UpdateProgress._num_op_codes + 4)] _num_op_codes = UpdateProgress._num_op_codes + 4 __slots__ = () BEGIN = RootUpdateProgress.BEGIN END = RootUpdateProgress.END REMOVE = RootUpdateProgress.REMOVE BRANCHCHANGE = RootUpdateProgress.BRANCHCHANGE URLCHANGE = RootUpdateProgress.URLCHANGE PATHCHANGE = RootUpdateProgress.PATHCHANGE class RootModule(Submodule): """A (virtual) Root of all submodules in the given repository. It can be used to more easily traverse all submodules of the master repository""" __slots__ = () k_root_name = '__ROOT__' def __init__(self, repo: 'Repo'): # repo, binsha, mode=None, path=None, name = None, parent_commit=None, url=None, ref=None) super(RootModule, self).__init__( repo, binsha=self.NULL_BIN_SHA, mode=self.k_default_mode, path='', name=self.k_root_name, parent_commit=repo.head.commit, url='', branch_path=git.Head.to_full_path(self.k_head_default) ) def _clear_cache(self) -> None: """May not do anything""" pass #{ Interface def update(self, previous_commit: Union[Commit_ish, None] = None, # type: ignore[override] recursive: bool = True, force_remove: bool = False, init: bool = True, to_latest_revision: bool = False, progress: Union[None, 'RootUpdateProgress'] = None, dry_run: bool = False, force_reset: bool = False, keep_going: bool = False ) -> 'RootModule': """Update the submodules of this repository to the current HEAD commit. This method behaves smartly by determining changes of the path of a submodules repository, next to changes to the to-be-checked-out commit or the branch to be checked out. This works if the submodules ID does not change. Additionally it will detect addition and removal of submodules, which will be handled gracefully. :param previous_commit: If set to a commit'ish, the commit we should use as the previous commit the HEAD pointed to before it was set to the commit it points to now. If None, it defaults to HEAD@{1} otherwise :param recursive: if True, the children of submodules will be updated as well using the same technique :param force_remove: If submodules have been deleted, they will be forcibly removed. Otherwise the update may fail if a submodule's repository cannot be deleted as changes have been made to it (see Submodule.update() for more information) :param init: If we encounter a new module which would need to be initialized, then do it. :param to_latest_revision: If True, instead of checking out the revision pointed to by this submodule's sha, the checked out tracking branch will be merged with the latest remote branch fetched from the repository's origin. Unless force_reset is specified, a local tracking branch will never be reset into its past, therefore the remote branch must be in the future for this to have an effect. :param force_reset: if True, submodules may checkout or reset their branch even if the repository has pending changes that would be overwritten, or if the local tracking branch is in the future of the remote tracking branch and would be reset into its past. :param progress: RootUpdateProgress instance or None if no progress should be sent :param dry_run: if True, operations will not actually be performed. Progress messages will change accordingly to indicate the WOULD DO state of the operation. :param keep_going: if True, we will ignore but log all errors, and keep going recursively. Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see otherwise. In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules :return: self""" if self.repo.bare: raise InvalidGitRepositoryError("Cannot update submodules in bare repositories") # END handle bare if progress is None: progress = RootUpdateProgress() # END assure progress is set prefix = '' if dry_run: prefix = 'DRY-RUN: ' repo = self.repo try: # SETUP BASE COMMIT ################### cur_commit = repo.head.commit if previous_commit is None: try: previous_commit = repo.commit(repo.head.log_entry(-1).oldhexsha) if previous_commit.binsha == previous_commit.NULL_BIN_SHA: raise IndexError # END handle initial commit except IndexError: # in new repositories, there is no previous commit previous_commit = cur_commit # END exception handling else: previous_commit = repo.commit(previous_commit) # obtain commit object # END handle previous commit psms: 'IterableList[Submodule]' = self.list_items(repo, parent_commit=previous_commit) sms: 'IterableList[Submodule]' = self.list_items(repo) spsms = set(psms) ssms = set(sms) # HANDLE REMOVALS ################### rrsm = (spsms - ssms) len_rrsm = len(rrsm) for i, rsm in enumerate(rrsm): op = REMOVE if i == 0: op |= BEGIN # END handle begin # fake it into thinking its at the current commit to allow deletion # of previous module. Trigger the cache to be updated before that progress.update(op, i, len_rrsm, prefix + "Removing submodule %r at %s" % (rsm.name, rsm.abspath)) rsm._parent_commit = repo.head.commit rsm.remove(configuration=False, module=True, force=force_remove, dry_run=dry_run) if i == len_rrsm - 1: op |= END # END handle end progress.update(op, i, len_rrsm, prefix + "Done removing submodule %r" % rsm.name) # END for each removed submodule # HANDLE PATH RENAMES ##################### # url changes + branch changes csms = (spsms & ssms) len_csms = len(csms) for i, csm in enumerate(csms): psm: 'Submodule' = psms[csm.name] sm: 'Submodule' = sms[csm.name] # PATH CHANGES ############## if sm.path != psm.path and psm.module_exists(): progress.update(BEGIN | PATHCHANGE, i, len_csms, prefix + "Moving repository of submodule %r from %s to %s" % (sm.name, psm.abspath, sm.abspath)) # move the module to the new path if not dry_run: psm.move(sm.path, module=True, configuration=False) # END handle dry_run progress.update( END | PATHCHANGE, i, len_csms, prefix + "Done moving repository of submodule %r" % sm.name) # END handle path changes if sm.module_exists(): # HANDLE URL CHANGE ################### if sm.url != psm.url: # Add the new remote, remove the old one # This way, if the url just changes, the commits will not # have to be re-retrieved nn = '__new_origin__' smm = sm.module() rmts = smm.remotes # don't do anything if we already have the url we search in place if len([r for r in rmts if r.url == sm.url]) == 0: progress.update(BEGIN | URLCHANGE, i, len_csms, prefix + "Changing url of submodule %r from %s to %s" % (sm.name, psm.url, sm.url)) if not dry_run: assert nn not in [r.name for r in rmts] smr = smm.create_remote(nn, sm.url) smr.fetch(progress=progress) # If we have a tracking branch, it should be available # in the new remote as well. if len([r for r in smr.refs if r.remote_head == sm.branch_name]) == 0: raise ValueError( "Submodule branch named %r was not available in new submodule remote at %r" % (sm.branch_name, sm.url) ) # END head is not detached # now delete the changed one rmt_for_deletion = None for remote in rmts: if remote.url == psm.url: rmt_for_deletion = remote break # END if urls match # END for each remote # if we didn't find a matching remote, but have exactly one, # we can safely use this one if rmt_for_deletion is None: if len(rmts) == 1: rmt_for_deletion = rmts[0] else: # if we have not found any remote with the original url # we may not have a name. This is a special case, # and its okay to fail here # Alternatively we could just generate a unique name and leave all # existing ones in place raise InvalidGitRepositoryError( "Couldn't find original remote-repo at url %r" % psm.url) # END handle one single remote # END handle check we found a remote orig_name = rmt_for_deletion.name smm.delete_remote(rmt_for_deletion) # NOTE: Currently we leave tags from the deleted remotes # as well as separate tracking branches in the possibly totally # changed repository ( someone could have changed the url to # another project ). At some point, one might want to clean # it up, but the danger is high to remove stuff the user # has added explicitly # rename the new remote back to what it was smr.rename(orig_name) # early on, we verified that the our current tracking branch # exists in the remote. Now we have to assure that the # sha we point to is still contained in the new remote # tracking branch. smsha = sm.binsha found = False rref = smr.refs[self.branch_name] for c in rref.commit.traverse(): if c.binsha == smsha: found = True break # END traverse all commits in search for sha # END for each commit if not found: # adjust our internal binsha to use the one of the remote # this way, it will be checked out in the next step # This will change the submodule relative to us, so # the user will be able to commit the change easily log.warning("Current sha %s was not contained in the tracking\ branch at the new remote, setting it the the remote's tracking branch", sm.hexsha) sm.binsha = rref.commit.binsha # END reset binsha # NOTE: All checkout is performed by the base implementation of update # END handle dry_run progress.update( END | URLCHANGE, i, len_csms, prefix + "Done adjusting url of submodule %r" % (sm.name)) # END skip remote handling if new url already exists in module # END handle url # HANDLE PATH CHANGES ##################### if sm.branch_path != psm.branch_path: # finally, create a new tracking branch which tracks the # new remote branch progress.update(BEGIN | BRANCHCHANGE, i, len_csms, prefix + "Changing branch of submodule %r from %s to %s" % (sm.name, psm.branch_path, sm.branch_path)) if not dry_run: smm = sm.module() smmr = smm.remotes # As the branch might not exist yet, we will have to fetch all remotes to be sure ... . for remote in smmr: remote.fetch(progress=progress) # end for each remote try: tbr = git.Head.create(smm, sm.branch_name, logmsg='branch: Created from HEAD') except OSError: # ... or reuse the existing one tbr = git.Head(smm, sm.branch_path) # END assure tracking branch exists tbr.set_tracking_branch(find_first_remote_branch(smmr, sm.branch_name)) # NOTE: All head-resetting is done in the base implementation of update # but we will have to checkout the new branch here. As it still points to the currently # checkout out commit, we don't do any harm. # As we don't want to update working-tree or index, changing the ref is all there is to do smm.head.reference = tbr # END handle dry_run progress.update( END | BRANCHCHANGE, i, len_csms, prefix + "Done changing branch of submodule %r" % sm.name) # END handle branch # END handle # END for each common submodule except Exception as err: if not keep_going: raise log.error(str(err)) # end handle keep_going # FINALLY UPDATE ALL ACTUAL SUBMODULES ###################################### for sm in sms: # update the submodule using the default method sm.update(recursive=False, init=init, to_latest_revision=to_latest_revision, progress=progress, dry_run=dry_run, force=force_reset, keep_going=keep_going) # update recursively depth first - question is which inconsitent # state will be better in case it fails somewhere. Defective branch # or defective depth. The RootSubmodule type will never process itself, # which was done in the previous expression if recursive: # the module would exist by now if we are not in dry_run mode if sm.module_exists(): type(self)(sm.module()).update(recursive=True, force_remove=force_remove, init=init, to_latest_revision=to_latest_revision, progress=progress, dry_run=dry_run, force_reset=force_reset, keep_going=keep_going) # END handle dry_run # END handle recursive # END for each submodule to update return self def module(self) -> 'Repo': """:return: the actual repository containing the submodules""" return self.repo #} END interface #} END classes
#!/usr/bin/env python import time import numpy as np from selfdrive.config import Conversions as CV from selfdrive.car.honda.carstate import CarState from selfdrive.car.honda.carcontroller import CarController, AH from selfdrive.boardd.boardd import can_capnp_to_can_list from cereal import car import zmq from selfdrive.services import service_list import selfdrive.messaging as messaging # Car button codes class CruiseButtons: RES_ACCEL = 4 DECEL_SET = 3 CANCEL = 2 MAIN = 1 #car chimes: enumeration from dbc file. Chimes are for alerts and warnings class CM: MUTE = 0 SINGLE = 3 DOUBLE = 4 REPEATED = 1 CONTINUOUS = 2 #car beepss: enumeration from dbc file. Beeps are for activ and deactiv class BP: MUTE = 0 SINGLE = 3 TRIPLE = 2 REPEATED = 1 class CarInterface(object): def __init__(self, CP, logcan, sendcan=None): self.logcan = logcan self.CP = CP self.frame = 0 self.can_invalid_count = 0 # *** init the major players *** self.CS = CarState(CP, self.logcan) # sending if read only is False if sendcan is not None: self.sendcan = sendcan self.CC = CarController() if self.CS.accord: self.accord_msg = [] # returns a car.CarState def update(self): # ******************* do can recv ******************* can_pub_main = [] canMonoTimes = [] for a in messaging.drain_sock(self.logcan): canMonoTimes.append(a.logMonoTime) can_pub_main.extend(can_capnp_to_can_list(a.can, [0,2])) if self.CS.accord: self.accord_msg.extend(can_capnp_to_can_list(a.can, [9])) self.accord_msg = self.accord_msg[-1:] self.CS.update(can_pub_main) # create message ret = car.CarState.new_message() # speeds ret.vEgo = self.CS.v_ego ret.wheelSpeeds.fl = self.CS.cp.vl[0x1D0]['WHEEL_SPEED_FL'] ret.wheelSpeeds.fr = self.CS.cp.vl[0x1D0]['WHEEL_SPEED_FR'] ret.wheelSpeeds.rl = self.CS.cp.vl[0x1D0]['WHEEL_SPEED_RL'] ret.wheelSpeeds.rr = self.CS.cp.vl[0x1D0]['WHEEL_SPEED_RR'] # gas pedal ret.gas = self.CS.car_gas / 256.0 if not self.CP.enableGas: ret.gasPressed = self.CS.pedal_gas > 0 else: ret.gasPressed = self.CS.user_gas_pressed # brake pedal ret.brake = self.CS.user_brake ret.brakePressed = self.CS.brake_pressed != 0 # steering wheel # TODO: units ret.steeringAngle = self.CS.angle_steers if self.CS.accord: # TODO: move this into the CAN parser ret.steeringTorque = 0 if len(self.accord_msg) > 0: aa = map(lambda x: ord(x)&0x7f, self.accord_msg[0][2]) if len(aa) != 5 or (-(aa[0]+aa[1]+aa[2]+aa[3]))&0x7f != aa[4]: print "ACCORD MSG BAD LEN OR CHECKSUM!" # TODO: throw an error here? else: st = ((aa[0]&0xF) << 5) + (aa[1]&0x1F) if st >= 256: st = -(512-st) ret.steeringTorque = st ret.steeringPressed = abs(ret.steeringTorque) > 20 else: ret.steeringTorque = self.CS.cp.vl[0x18F]['STEER_TORQUE_SENSOR'] ret.steeringPressed = self.CS.steer_override # cruise state ret.cruiseState.enabled = self.CS.pcm_acc_status != 0 ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS # TODO: button presses buttonEvents = [] if self.CS.left_blinker_on != self.CS.prev_left_blinker_on: be = car.CarState.ButtonEvent.new_message() be.type = 'leftBlinker' be.pressed = self.CS.left_blinker_on != 0 buttonEvents.append(be) if self.CS.right_blinker_on != self.CS.prev_right_blinker_on: be = car.CarState.ButtonEvent.new_message() be.type = 'rightBlinker' be.pressed = self.CS.right_blinker_on != 0 buttonEvents.append(be) if self.CS.cruise_buttons != self.CS.prev_cruise_buttons: be = car.CarState.ButtonEvent.new_message() be.type = 'unknown' if self.CS.cruise_buttons != 0: be.pressed = True but = self.CS.cruise_buttons else: be.pressed = False but = self.CS.prev_cruise_buttons if but == CruiseButtons.RES_ACCEL: be.type = 'accelCruise' elif but == CruiseButtons.DECEL_SET: be.type = 'decelCruise' elif but == CruiseButtons.CANCEL: be.type = 'cancel' elif but == CruiseButtons.MAIN: be.type = 'altButton3' buttonEvents.append(be) if self.CS.cruise_setting != self.CS.prev_cruise_setting: be = car.CarState.ButtonEvent.new_message() be.type = 'unknown' if self.CS.cruise_setting != 0: be.pressed = True but = self.CS.cruise_setting else: be.pressed = False but = self.CS.prev_cruise_setting if but == 1: be.type = 'altButton1' # TODO: more buttons? buttonEvents.append(be) ret.buttonEvents = buttonEvents # errors # TODO: I don't like the way capnp does enums # These strings aren't checked at compile time errors = [] if not self.CS.can_valid: self.can_invalid_count += 1 if self.can_invalid_count >= 5: errors.append('commIssue') else: self.can_invalid_count = 0 if self.CS.steer_error: errors.append('steerUnavailable') elif self.CS.steer_not_allowed: errors.append('steerTemporarilyUnavailable') if self.CS.brake_error: errors.append('brakeUnavailable') if not self.CS.gear_shifter_valid: errors.append('wrongGear') if not self.CS.door_all_closed: errors.append('doorOpen') if not self.CS.seatbelt: errors.append('seatbeltNotLatched') if self.CS.esp_disabled: errors.append('espDisabled') if not self.CS.main_on: errors.append('wrongCarMode') if self.CS.gear_shifter == 2: errors.append('reverseGear') ret.errors = errors ret.canMonoTimes = canMonoTimes # cast to reader so it can't be modified #print ret return ret.as_reader() # pass in a car.CarControl # to be called @ 100hz def apply(self, c): #print c if c.hudControl.speedVisible: hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH else: hud_v_cruise = 255 hud_alert = { "none": AH.NONE, "fcw": AH.FCW, "steerRequired": AH.STEER, "brakePressed": AH.BRAKE_PRESSED, "wrongGear": AH.GEAR_NOT_D, "seatbeltUnbuckled": AH.SEATBELT, "speedTooHigh": AH.SPEED_TOO_HIGH}[str(c.hudControl.visualAlert)] snd_beep, snd_chime = { "none": (BP.MUTE, CM.MUTE), "beepSingle": (BP.SINGLE, CM.MUTE), "beepTriple": (BP.TRIPLE, CM.MUTE), "beepRepeated": (BP.REPEATED, CM.MUTE), "chimeSingle": (BP.MUTE, CM.SINGLE), "chimeDouble": (BP.MUTE, CM.DOUBLE), "chimeRepeated": (BP.MUTE, CM.REPEATED), "chimeContinuous": (BP.MUTE, CM.CONTINUOUS)}[str(c.hudControl.audibleAlert)] pcm_accel = int(np.clip(c.cruiseControl.accelOverride/1.4,0,1)*0xc6) self.CC.update(self.sendcan, c.enabled, self.CS, self.frame, \ c.gas, c.brake, c.steeringTorque, \ c.cruiseControl.speedOverride, \ c.cruiseControl.override, \ c.cruiseControl.cancel, \ pcm_accel, \ hud_v_cruise, c.hudControl.lanesVisible, \ hud_show_car = c.hudControl.leadVisible, \ hud_alert = hud_alert, \ snd_beep = snd_beep, \ snd_chime = snd_chime) self.frame += 1 return not (c.enabled and not self.CC.controls_allowed)
import os from gym import error, spaces from gym.utils import seeding import numpy as np from os import path import gym import six try: import mujoco_py from mujoco_py.mjlib import mjlib except ImportError as e: raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e)) class MujocoEnv(gym.Env): """Superclass for all MuJoCo environments. """ def __init__(self, model_path, frame_skip): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.MjModel(fullpath) self.data = self.model.data self.viewer = None # self.camera2 = None # #import pdb; pdb.set_trace() # self.camera2 = mujoco_py.MjViewer(init_width=500, init_height=500) # self.camera2.start() # self.camera2.set_model(self.model) # self.camera2_setup() self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.model.data.qpos.ravel().copy() self.init_qvel = self.model.data.qvel.ravel().copy() observation, _reward, done, _info = self._step(np.zeros(self.model.nu)) assert not done self.obs_dim = observation.size bounds = self.model.actuator_ctrlrange.copy() low = bounds[:, 0] high = bounds[:, 1] self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self._seed() def _seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] # methods to override: # ---------------------------- def reset_model(self): """ Reset the robot degrees of freedom (qpos and qvel). Implement this in each subclass. """ raise NotImplementedError def viewer_setup(self): """ This method is called when the viewer is initialized and after every reset Optionally implement this method, if you need to tinker with camera position and so forth. """ pass # ----------------------------- def _reset(self): mjlib.mj_resetData(self.model.ptr, self.data.ptr) ob = self.reset_model() if self.viewer is not None: self.viewer.autoscale() self.viewer_setup() return ob def set_state(self, qpos, qvel): assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,) self.model.data.qpos = qpos self.model.data.qvel = qvel self.model._compute_subtree() # pylint: disable=W0212 # import pdb; pdb.set_trace() self.model.forward() @property def dt(self): return self.model.opt.timestep * self.frame_skip def do_simulation(self, ctrl, n_frames): self.model.data.ctrl = ctrl for _ in range(n_frames): self.model.step() def _render(self, mode='human', close=False): if close: if self.viewer is not None: self._get_viewer().finish() self.viewer = None return if mode == 'rgb_array': self._get_viewer().render() data, width, height = self._get_viewer().get_image() return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] elif mode == 'human': self._get_viewer().loop_once() def _get_viewer(self): if self.viewer is None: self.viewer = mujoco_py.MjViewer() self.viewer.start() self.viewer.set_model(self.model) self.viewer_setup() return self.viewer def get_body_com(self, body_name): idx = self.model.body_names.index(six.b(body_name)) return self.model.data.com_subtree[idx] def get_body_comvel(self, body_name): idx = self.model.body_names.index(six.b(body_name)) return self.model.body_comvels[idx] def get_body_xmat(self, body_name): idx = self.model.body_names.index(six.b(body_name)) return self.model.data.xmat[idx].reshape((3, 3)) def state_vector(self): return np.concatenate([ self.model.data.qpos.flat, self.model.data.qvel.flat ]) class MujocoPixelEnv(MujocoEnv): def __init__( self, model_path, frame_skip, width=42, height=42, mode="rgb" ): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.MjModel(fullpath) self.data = self.model.data self.width = width self.height = height self.mode = mode self.viewer = None self.camera2 = None self.camera2 = mujoco_py.MjViewer(init_width=self.width, init_height=self.height) self.camera2.start() self.camera2.set_model(self.model) self.camera2_setup() self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.model.data.qpos.ravel().copy() self.init_qvel = self.model.data.qvel.ravel().copy() observation, _reward, done, _info = self._step(np.zeros(self.model.nu)) assert not done self.obs_dim = observation.size bounds = self.model.actuator_ctrlrange.copy() low = bounds[:, 0] high = bounds[:, 1] self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self._seed() def camera2_setup(self): raise NotImplementedError def _get_obs(self): camera2_output = None self.camera2.render() data, width, height = self.camera2.get_image() camera2_output = np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] if self.mode == "grey": camera2_output = np.mean(camera2_output, axis=2)[:, :, np.newaxis] return camera2_output class MujocoPixel2CamEnv(MujocoEnv): def __init__( self, model_path, frame_skip, width=42, height=42, mode="rgb" ): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.MjModel(fullpath) self.data = self.model.data self.width = width self.height = height self.mode = mode self.viewer = None self.camera2 = None self.camera2 = mujoco_py.MjViewer(init_width=self.width, init_height=self.height) self.camera2.start() self.camera2.set_model(self.model) self.camera2_setup() self.camera3 = None self.camera3 = mujoco_py.MjViewer(init_width=self.width, init_height=self.height) self.camera3.start() self.camera3.set_model(self.model) self.camera3_setup() azimuth = self.camera2.cam.azimuth self.camera3.cam.azimuth = azimuth + 180 self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.model.data.qpos.ravel().copy() self.init_qvel = self.model.data.qvel.ravel().copy() observation, _reward, done, _info = self._step(np.zeros(self.model.nu)) assert not done self.obs_dim = observation.size bounds = self.model.actuator_ctrlrange.copy() low = bounds[:, 0] high = bounds[:, 1] self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self._seed() def camera2_setup(self): raise NotImplementedError def camera3_setup(self): raise NotImplementedError def _get_obs(self): camera2_output = None self.camera2.render() data, width, height = self.camera2.get_image() camera2_output = np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] if self.mode == "grey": camera2_output = np.mean(camera2_output, axis=2)[:, :, np.newaxis] camera3_output = None self.camera3.render() data, width, height = self.camera3.get_image() camera3_output = np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] if self.mode == "grey": camera3_output = np.mean(camera3_output, axis=2)[:, :, np.newaxis] return np.concatenate([camera2_output, camera3_output], axis=2)
import re from utils import * SEARCH_LIMIT = 10000 def get_gene(db, gene_id): return db.genes.find_one({'gene_id': gene_id}, projection={'_id': False}) def get_gene_by_name(db, gene_name): # try gene_name field first gene = db.genes.find_one({'gene_name': gene_name}, projection={'_id': False}) if gene: return gene # if not, try gene['other_names'] return db.genes.find_one({'other_names': gene_name}, projection={'_id': False}) def get_transcript(db, transcript_id): transcript = db.transcripts.find_one({'transcript_id': transcript_id}, projection={'_id': False}) if not transcript: return None transcript['exons'] = get_exons_in_transcript(db, transcript_id, sort=True) return transcript def get_raw_variant(db, source, xpos, ref, alt, get_id=False): if source == 'exac': exac_variant = db.exome_variants.find_one({'xpos': xpos, 'ref': ref, 'alt': alt}, projection={'_id': get_id}) return exac_variant if source == 'gnomad': gnomad_variant = db.genome_variants.find_one({'xpos': xpos, 'ref': ref, 'alt': alt}, projection={'_id': get_id}) return gnomad_variant def get_variant(db, source, xpos, ref, alt): variant = get_raw_variant(db, source, xpos, ref, alt, False) if variant is None or 'rsid' not in variant: return variant if variant['rsid'] == '.' or variant['rsid'] is None: rsid = db.dbsnp.find_one({'xpos': xpos}) if rsid: variant['rsid'] = 'rs%s' % rsid['rsid'] return variant def get_variants_by_rsid(db, rsid): if not rsid.startswith('rs'): return None try: int(rsid.lstrip('rs')) except Exception, e: return None exome_variants = list(db.exome_variants.find({'rsid': rsid}, projection={'_id': False})) for variant in exome_variants: variant['dataset'] = 'ExAC' genome_variants = list(db.genome_variants.find({'rsid': rsid}, projection={'_id': False})) for variant in genome_variants: variant['dataset'] = 'gnomAD' variants = exome_variants + genome_variants add_consequence_to_variants(variants) return variants def get_variants_from_dbsnp(db, rsid): if not rsid.startswith('rs'): return None try: rsid = int(rsid.lstrip('rs')) except Exception, e: return None position = db.dbsnp.find_one({'rsid': rsid}) if position: exome_variants = list(db.exome_variants.find({'xpos': {'$lte': position['xpos'], '$gte': position['xpos']}}, projection={'_id': False})) for variant in exome_variants: variant['dataset'] = 'ExAC' genome_variants = list(db.exome_variants.find({'xpos': {'$lte': position['xpos'], '$gte': position['xpos']}}, projection={'_id': False})) for variant in genome_variants: variant['dataset'] = 'gnomAD' variants = exome_variants + genome_variants add_consequence_to_variants(variants) return variants return [] def get_coverage_for_bases(db, collection, xstart, xstop=None): """ Get the coverage for the list of bases given by xstart->xstop, inclusive Returns list of coverage dicts xstop can be None if just one base, but you'll still get back a list """ if xstop is None: xstop = xstart coverages = { doc['xpos']: doc for doc in db[collection].find( {'xpos': {'$gte': xstart, '$lte': xstop}}, projection={'_id': False} ) } ret = [] for i in range(xstart, xstop+1): if i in coverages: ret.append(coverages[i]) else: ret.append({'xpos': i, 'pos': xpos_to_pos(i)}) for item in ret: item['has_coverage'] = 'mean' in item del item['xpos'] return ret def get_coverage_for_transcript(db, collection, xstart, xstop=None): """ :param db: :param genomic_coord_to_exon: :param xstart: :param xstop: :return: """ coverage_array = get_coverage_for_bases(db, collection, xstart, xstop) # only return coverages that have coverage (if that makes any sense?) # return coverage_array covered = [c for c in coverage_array if c['has_coverage']] for c in covered: del c['has_coverage'] return covered def get_constraint_for_transcript(db, transcript): return db.constraint.find_one({'transcript': transcript}, projection={'_id': False}) def get_exons_cnvs(db, transcript_name): return list(db.cnvs.find({'transcript': transcript_name}, projection={'_id': False})) def get_cnvs(db, gene_name): return list(db.cnvgenes.find({'gene': gene_name}, projection={'_id': False})) def get_awesomebar_suggestions(g, query): """ This generates autocomplete suggestions when user query is the string that user types If it is the prefix for a gene, return list of gene names """ regex = re.compile('^' + re.escape(query), re.IGNORECASE) results = [r for r in g.autocomplete_strings if regex.match(r)][:20] return results # 1:1-1000 R1 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)-(\d+)$') R2 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)$') R3 = re.compile(r'^(\d+|X|Y|M|MT)$') # R4 = re.compile(r'^(\d+|X|Y|M|MT)\s*[-:]\s*(\d+)-([ATCG]+)-([ATCG]+)$') R4 = re.compile(r'^\s*(\d+|X|Y|M|MT)\s*[-:]\s*(\d+)[-:\s]*([ATCG]+)\s*[-:/]\s*([ATCG]+)\s*$') def get_awesomebar_result(db, query): """ Similar to the above, but this is after a user types enter We need to figure out what they meant - could be gene, variant, region Return tuple of (datatype, identifier) Where datatype is one of 'gene', 'variant', or 'region' And identifier is one of: - ensembl ID for gene - variant ID string for variant (eg. 1-1000-A-T) - region ID string for region (eg. 1-1000-2000) Follow these steps: - if query is an ensembl ID, return it - if a gene symbol, return that gene's ensembl ID - if an RSID, return that variant's string Finally, note that we don't return the whole object here - only it's identifier. This could be important for performance later """ query = query.strip() print 'Query: %s' % query # Variant variant = get_variants_by_rsid(db, query.lower()) if variant: if len(variant) == 1: return 'variant', variant[0]['variant_id'] else: return 'dbsnp_variant_set', variant[0]['rsid'] variant = get_variants_from_dbsnp(db, query.lower()) if variant: return 'variant', variant[0]['variant_id'] # variant = get_variant(db, ) # TODO - https://github.com/brettpthomas/exac_browser/issues/14 gene = get_gene_by_name(db, query) if gene: return 'gene', gene['gene_id'] # From here out, all should be uppercase (gene, tx, region, variant_id) query = query.upper() gene = get_gene_by_name(db, query) if gene: return 'gene', gene['gene_id'] # Ensembl formatted queries if query.startswith('ENS'): # Gene gene = get_gene(db, query) if gene: return 'gene', gene['gene_id'] # Transcript transcript = get_transcript(db, query) if transcript: return 'transcript', transcript['transcript_id'] # From here on out, only region queries if query.startswith('CHR'): query = query.lstrip('CHR') # Region m = R1.match(query) if m: if int(m.group(3)) < int(m.group(2)): return 'region', 'invalid' return 'region', '{}-{}-{}'.format(m.group(1), m.group(2), m.group(3)) m = R2.match(query) if m: return 'region', '{}-{}-{}'.format(m.group(1), m.group(2), m.group(2)) m = R3.match(query) if m: return 'region', '{}'.format(m.group(1)) m = R4.match(query) if m: return 'variant', '{}-{}-{}-{}'.format(m.group(1), m.group(2), m.group(3), m.group(4)) return 'not_found', query def get_genes_in_region(db, chrom, start, stop): """ Genes that overlap a region """ xstart = get_xpos(chrom, start) xstop = get_xpos(chrom, stop) genes = db.genes.find({ 'xstart': {'$lte': xstop}, 'xstop': {'$gte': xstart}, }, projection={'_id': False}) return list(genes) def get_variants_in_region(db, chrom, start, stop): """ Variants that overlap a region Unclear if this will include CNVs """ xstart = get_xpos(chrom, start) xstop = get_xpos(chrom, stop) exome_variants = list(db.exome_variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart} }, projection={'_id': False}, limit=SEARCH_LIMIT)) for variant in exome_variants: variant['dataset'] = 'ExAC' genome_variants = list(db.genome_variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart} }, projection={'_id': False}, limit=SEARCH_LIMIT)) for variant in genome_variants: variant['dataset'] = 'gnomAD' variants = exome_variants + genome_variants add_consequence_to_variants(variants) for variant in variants: remove_extraneous_information(variant) return list(variants) def get_metrics(db, variant, source): if source == 'exac': metrics_collection = 'exome_metrics' if source == 'gnomad': metrics_collection = 'genome_metrics' if 'allele_count' not in variant or variant['allele_num'] == 0: return None metrics = {} for metric in METRICS + AS_METRICS: metrics[metric] = db[metrics_collection].find_one({'metric': metric}, projection={'_id': False}) metric = None if variant['allele_count'] == 1: metric = 'singleton' elif variant['allele_count'] == 2: metric = 'doubleton' else: for af in AF_BUCKETS: if float(variant['allele_count'])/variant['allele_num'] < af: metric = af break if metric is not None: metrics['Site Quality'] = db[metrics_collection].find_one({'metric': 'binned_%s' % metric}, projection={'_id': False}) return metrics def remove_extraneous_information(variant): # del variant['genotype_depths'] # del variant['genotype_qualities'] del variant['transcripts'] del variant['genes'] del variant['orig_alt_alleles'] del variant['xpos'] del variant['xstart'] del variant['xstop'] del variant['site_quality'] del variant['vep_annotations'] def get_variants_in_gene_or_transcript(db, gene_id=None, transcript_id=None): """Return ExAC and gnomad variants in a gene or transcript Args: db: The mongo database object gene_id, transcript_id: one and only one of these 2 arguments must be specified. This function will query for variants in the exons of the given gene or the transcript depending on which one is specified. """ all_variants = [] exac_variant_uuids = [] gnomad_variant_uuids = [] if gene_id is not None and transcript_id is not None: raise ValueError("Both gene_id and transcript_id args are not None") if gene_id is not None: exons = get_exons_in_gene(db, gene_id) elif transcript_id is not None: exons = get_exons_in_transcript(db, transcript_id) else: raise ValueError("Both gene_id and transcript_id args = None") query_limit_to_exon_ranges = {'$or': [{'$and': [{'xpos': {'$gt': int(exon['xstart'])-75}}, {'xpos': {'$lt': int(exon['xstop'])+75}}]} for exon in exons]} query = {'$and': [{'genes': gene_id} if gene_id is not None else {'transcripts': transcript_id}, query_limit_to_exon_ranges]} results = list(db.exome_variants.find(query)) print("Retrieving %s ExAC v2 variants in %s exons of %s" % (len(results), len(exons), gene_id or transcript_id)) for variant in results: variant['vep_annotations'] = [x for x in variant['vep_annotations'] if (x['Gene'] == gene_id or x['Feature'] == transcript_id)] variant['uuid'] = str(variant['_id']) variant['dataset'] = 'ExAC' del variant['_id'] add_consequence_to_variant(variant) remove_extraneous_information(variant) exac_variant_uuids.append(variant['uuid']) all_variants.append(variant) results = list(db.genome_variants.find(query)) print("Retrieving %s gnomad variants in %s exons of %s" % (len(results), len(exons), gene_id or transcript_id)) for variant in results: variant['vep_annotations'] = [x for x in variant['vep_annotations'] if (x['Gene'] == gene_id or x['Feature'] == transcript_id)] variant['uuid'] = str(variant['_id']) variant['dataset'] = 'gnomAD' del variant['_id'] add_consequence_to_variant(variant) remove_extraneous_information(variant) gnomad_variant_uuids.append(variant['uuid']) all_variants.append(variant) print("Returning %s variants" % len(all_variants)) return { 'all_variants': all_variants, 'uuid_lists': { 'all': exac_variant_uuids + gnomad_variant_uuids, 'exac': exac_variant_uuids, 'gnomad': gnomad_variant_uuids } } def get_transcripts_in_gene(db, gene_id): """ """ return list(db.transcripts.find({'gene_id': gene_id}, projection={'_id': False})) def get_exons_in_transcript(db, transcript_id, sort=True): results = db.exons.find({'transcript_id': transcript_id, 'feature_type': { "$in": ['CDS', 'UTR', 'exon'] }}, projection={'_id': False}) if sort: return sorted(results, key=lambda k: k['start']) else: return results def get_exons_in_gene(db, gene_id, sort=False): results = list(db.exons.find({'gene_id': gene_id, 'feature_type': { "$in": ['CDS', 'UTR', 'exon'] }}, projection={'_id': False})) if sort: return sorted(results, key=lambda k: k['start']) else: return results
import json import sys from StringIO import StringIO import nose from nose.tools import assert_raises, eq_ from .helper import TestCase from validator.contextgenerator import ContextGenerator from validator.errorbundler import ErrorBundle def test_message_completeness(): """Test we're fully expecting all of the values for a message.""" bundle = ErrorBundle() bundle.error( ('id', ), 'error', 'description', 'file', 123, # line 456 # column ) results = json.loads(bundle.render_json()) eq_(len(results['messages']), 1, 'Unexpected number of messages.') message = results['messages'][0] eq_(message['id'], ['id']) eq_(message['message'], 'error') eq_(message['description'], 'description') eq_(message['file'], 'file') eq_(message['line'], 123) eq_(message['column'], 456) def test_json(): """Test the JSON output capability of the error bundler.""" # Use the StringIO as an output buffer. bundle = ErrorBundle() bundle.detected_type = 4 bundle.set_tier(4) bundle.set_tier(3) bundle.error((), 'error', 'description') bundle.warning((), 'warning', 'description') bundle.notice((), 'notice', 'description') results = json.loads(bundle.render_json()) print results assert len(results['messages']) == 3 assert results['detected_type'] == 'langpack' assert not results['success'] assert results['ending_tier'] == 4 def test_boring(): """Test that boring output strips out color sequences.""" stdout = sys.stdout sys.stdout = StringIO() # Use the StringIO as an output buffer. bundle = ErrorBundle() bundle.error((), '<<BLUE>><<GREEN>><<YELLOW>>') bundle.print_summary(no_color=True) outbuffer = sys.stdout sys.stdout = stdout outbuffer.seek(0) assert outbuffer.getvalue().count('<<GREEN>>') == 0 def test_type(): """ Test that detected type is being stored properly in the error bundle. """ bundle = ErrorBundle() bundle.detected_type = 5 assert bundle.detected_type == 5 def test_states(): """Test that detected type is preserved, even in subpackages.""" # Use the StringIO as an output buffer. bundle = ErrorBundle() # Populate the bundle with some test data. bundle.detected_type = 4 bundle.error((), 'error') bundle.warning((), 'warning') bundle.notice((), 'notice') bundle.save_resource('test', True) # Push a state bundle.push_state('test.xpi') bundle.detected_type = 2 bundle.error((), 'nested error') bundle.warning((), 'nested warning') bundle.notice((), 'nested notice') # Push another state bundle.push_state('test2.xpi') bundle.detected_type = 3 bundle.error((), 'super nested error') bundle.warning((), 'super nested warning') bundle.notice((), 'super nested notice') # Test that nested compatibility messages retain various # properties. bundle.notice('comp', 'Compat Test notice', compatibility_type='error', editors_only=True, signing_severity='high') bundle.pop_state() bundle.pop_state() # Load the JSON output as an object. output = json.loads(bundle.render_json()) # Run some basic tests assert output['detected_type'] == 'langpack' assert len(output['messages']) == 10 messages = ['error', 'warning', 'notice', 'nested error', 'nested warning', 'nested notice', 'super nested error', 'super nested warning', 'super nested notice', 'Compat Test notice'] for message in output['messages']: assert message['message'] in messages messages.remove(message['message']) assert message['message'].endswith(message['type']) if message['id'] == 'comp': assert message['compatibility_type'] == 'error' assert message['editors_only'] == True assert message['signing_severity'] == 'high' assert not messages assert bundle.get_resource('test') def test_file_structure(): """ Test the means by which file names and line numbers are stored in errors, warnings, and messages. """ # Use the StringIO as an output buffer. bundle = ErrorBundle(determined=True) # Populate the bundle with some test data. bundle.error((), 'error', description='', filename='file1', column=123) bundle.error((), 'error', description='', filename='file2') bundle.error((), 'error') # Push a state bundle.push_state('foo') bundle.warning((), 'warning', description='', filename='file4', column=123) bundle.warning((), 'warning', description='', filename='file5') bundle.warning((), 'warning') bundle.pop_state() # Load the JSON output as an object. output = json.loads(bundle.render_json()) # Do the same for friendly output output2 = bundle.print_summary(verbose=False) # Do the same for verbose friendly output output3 = bundle.print_summary(verbose=True) # Run some basic tests assert len(output['messages']) == 6 assert len(output2) < len(output3) print output print '*' * 50 print output2 print '*' * 50 print output3 print '*' * 50 messages = ['file1', 'file2', '', ['foo', 'file4'], ['foo', 'file5'], ['foo', '']] for message in output['messages']: print message assert message['file'] in messages messages.remove(message['file']) if isinstance(message['file'], list): pattern = message['file'][:] pattern.pop() pattern.append('') file_merge = ' > '.join(pattern) print file_merge assert output3.count(file_merge) else: assert output3.count(message['file']) assert not messages def test_notice(): """Test notice-related functions of the error bundler.""" # Use the StringIO as an output buffer. bundle = ErrorBundle() bundle.notice((), '') # Load the JSON output as an object. output = json.loads(bundle.render_json()) # Run some basic tests assert len(output['messages']) == 1 print output has_ = False for message in output['messages']: print message if message['type'] == 'notice': has_ = True assert has_ assert not bundle.failed() assert not bundle.failed(True) def test_notice_friendly(): """ Test notice-related human-friendly text output functions of the error bundler. """ # Use the StringIO as an output buffer. bundle = ErrorBundle() bundle.notice((), 'foobar') # Load the JSON output as an object. output = bundle.print_summary(verbose=True, no_color=True) print output assert output.count('foobar') def test_initializer(): """Test that the __init__ paramaters are doing their jobs.""" e = ErrorBundle() assert e.determined assert e.get_resource('listed') e = ErrorBundle(determined=False) assert not e.determined assert e.get_resource('listed') e = ErrorBundle(listed=False) assert e.determined assert not e.get_resource('listed') def test_json_constructs(): """This tests some of the internal JSON stuff so we don't break zamboni.""" e = ErrorBundle() e.detected_type = 1 e.error(('a', 'b', 'c'), 'Test') e.error(('a', 'b', 'foo'), 'Test') e.error(('a', 'foo', 'c'), 'Test') e.error(('a', 'foo', 'c'), 'Test') e.error(('b', 'foo', 'bar'), 'Test') e.warning((), 'Context test', context=('x', 'y', 'z')) e.warning((), 'Context test', context=ContextGenerator('x\ny\nz\n'), line=2, column=0) e.notice((), 'none') e.notice((), 'line', line=1) e.notice((), 'column', column=0) e.notice((), 'line column', line=1, column=1) results = e.render_json() print results j = json.loads(results) assert 'detected_type' in j assert j['detected_type'] == 'extension' assert 'message_tree' in j tree = j['message_tree'] assert '__errors' not in tree assert not tree['a']['__messages'] assert tree['a']['__errors'] == 4 assert not tree['a']['b']['__messages'] assert tree['a']['b']['__errors'] == 2 assert not tree['a']['b']['__messages'] assert tree['a']['b']['c']['__errors'] == 1 assert tree['a']['b']['c']['__messages'] assert 'messages' in j for m in (m for m in j['messages'] if m['type'] == 'warning'): assert m['context'] == ['x', 'y', 'z'] for m in (m for m in j['messages'] if m['type'] == 'notice'): if 'line' in m['message']: assert m['line'] is not None assert isinstance(m['line'], int) assert m['line'] > 0 else: assert m['line'] is None if 'column' in m['message']: assert m['column'] is not None assert isinstance(m['column'], int) assert m['column'] > -1 else: assert m['column'] is None def test_json_compatibility(): """Test compatibility elements in the JSON output.""" err = ErrorBundle() err.notice( err_id='m1', notice='Compat error', description='Compatibility error 1', compatibility_type='error') err.notice( err_id='m2', notice='Compat error', description='Compatibility error 2', compatibility_type='error') err.warning( err_id='m3', warning='Compat notice', description='Compatibility notice 1', compatibility_type='notice') err.warning( err_id='m4', warning='Compat warning', description='Compatibility warning 1', compatibility_type='warning') err.warning( err_id='m5', warning='Compat warning', description='Compatibility warning 1', compatibility_type='warning') err.error( err_id='foo', error='Something else', description='An error that has nothign to do with compatibility') results = err.render_json() jdata = json.loads(results) assert 'compatibility_summary' in jdata nose.tools.eq_(jdata['compatibility_summary'], {'errors': 2, 'warnings': 2, 'notices': 1}) reference = {'m1': 'error', 'm2': 'error', 'm3': 'notice', 'm4': 'warning', 'm5': 'warning'} assert 'messages' in jdata and len(jdata['messages']) for message in jdata['messages']: if message['id'] in reference: print (message['id'], reference[message['id']], message['compatibility_type']) nose.tools.eq_(reference[message['id']], message['compatibility_type']) def test_pushable_resources(): """ Test that normal resources are preserved but pushable ones are pushed. """ e = ErrorBundle() e.save_resource('nopush', True) e.save_resource('push', True, pushable=True) assert e.get_resource('nopush') assert e.get_resource('push') e.push_state() assert e.get_resource('nopush') assert not e.get_resource('push') e.save_resource('pushed', True, pushable=True) assert e.get_resource('pushed') e.pop_state() assert e.get_resource('nopush') assert e.get_resource('push') assert not e.get_resource('pushed') def test_forappversions(): """Test that app version information is passed to the JSON.""" app_test_data = {'guid': ['version1', 'version2']} e = ErrorBundle() e.supported_versions = {'guid': ['version1']} e.error(err_id=('foo', ), error='Test', for_appversions=app_test_data) # This one should not apply. e.error(err_id=('foo', ), error='Test', for_appversions={'fooguid': ['bar', 'baz']}) e.warning(err_id=('foo', ), warning='Test', for_appversions=app_test_data) # Give one its data from the decorator e.version_requirements = app_test_data e.notice(err_id=('foo', ), notice='Test') j = e.render_json() jdata = json.loads(j) assert len(jdata['messages']) == 3 for m in jdata['messages']: assert m['for_appversions'] == app_test_data class TestReport(TestCase): def test_merge_err_id(self): """Test that `err_id` values are merged as expected.""" self.err.report({'err_id': ('foo', 'bar', 'baz'), 'warning': 'Hello.'}, {'err_id': 'quux'}) self.assert_failed(with_warnings=[{'id': ('foo', 'bar', 'quux')}]) self.setup_err() self.err.report({'err_id': ('a', 'b', 'c'), 'warning': 'Hello.'}, {'err_id': ('foo', 'bar', 'quux')}) self.assert_failed(with_warnings=[{'id': ('foo', 'bar', 'quux')}]) with assert_raises(AssertionError): self.err.report({'err_id': 'foo'}, {'err_id': 'quux'}) with assert_raises(AssertionError): self.err.report({}, {'err_id': 'quux'}) def test_message_type(self): """Test that we emit a message of the correct type for each reporter function.""" for type_ in 'notice', 'warning', 'error': err = ErrorBundle() err.report({'err_id': ('a', 'b', 'c'), type_: 'Hello.'}) # Make sure we have this message in the list for the expected # message type. messages = getattr(err, '{0}s'.format(type_)) eq_(messages[0]['message'], 'Hello.') # Make sure we only have this message in one place. eq_(len(err.notices + err.warnings + err.errors), 1)
from jsonrpc import ServiceProxy import sys import string import getpass # ===== BEGIN USER SETTINGS ===== # if you do not set these you will be prompted for a password for every command rpcuser = "" rpcpass = "" # ====== END USER SETTINGS ====== if rpcpass == "": access = ServiceProxy("http://127.0.0.1:8332") else: access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332") cmd = sys.argv[1].lower() if cmd == "backupwallet": try: path = raw_input("Enter destination path/filename: ") print access.backupwallet(path) except: print "\n---An error occurred---\n" elif cmd == "getaccount": try: addr = raw_input("Enter a Mrcoin address: ") print access.getaccount(addr) except: print "\n---An error occurred---\n" elif cmd == "getaccountaddress": try: acct = raw_input("Enter an account name: ") print access.getaccountaddress(acct) except: print "\n---An error occurred---\n" elif cmd == "getaddressesbyaccount": try: acct = raw_input("Enter an account name: ") print access.getaddressesbyaccount(acct) except: print "\n---An error occurred---\n" elif cmd == "getbalance": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getbalance(acct, mc) except: print access.getbalance() except: print "\n---An error occurred---\n" elif cmd == "getblockbycount": try: height = raw_input("Height: ") print access.getblockbycount(height) except: print "\n---An error occurred---\n" elif cmd == "getblockcount": try: print access.getblockcount() except: print "\n---An error occurred---\n" elif cmd == "getblocknumber": try: print access.getblocknumber() except: print "\n---An error occurred---\n" elif cmd == "getconnectioncount": try: print access.getconnectioncount() except: print "\n---An error occurred---\n" elif cmd == "getdifficulty": try: print access.getdifficulty() except: print "\n---An error occurred---\n" elif cmd == "getgenerate": try: print access.getgenerate() except: print "\n---An error occurred---\n" elif cmd == "gethashespersec": try: print access.gethashespersec() except: print "\n---An error occurred---\n" elif cmd == "getinfo": try: print access.getinfo() except: print "\n---An error occurred---\n" elif cmd == "getnewaddress": try: acct = raw_input("Enter an account name: ") try: print access.getnewaddress(acct) except: print access.getnewaddress() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaccount": try: acct = raw_input("Enter an account (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaccount(acct, mc) except: print access.getreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "getreceivedbyaddress": try: addr = raw_input("Enter a Mrcoin address (optional): ") mc = raw_input("Minimum confirmations (optional): ") try: print access.getreceivedbyaddress(addr, mc) except: print access.getreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "gettransaction": try: txid = raw_input("Enter a transaction ID: ") print access.gettransaction(txid) except: print "\n---An error occurred---\n" elif cmd == "getwork": try: data = raw_input("Data (optional): ") try: print access.gettransaction(data) except: print access.gettransaction() except: print "\n---An error occurred---\n" elif cmd == "help": try: cmd = raw_input("Command (optional): ") try: print access.help(cmd) except: print access.help() except: print "\n---An error occurred---\n" elif cmd == "listaccounts": try: mc = raw_input("Minimum confirmations (optional): ") try: print access.listaccounts(mc) except: print access.listaccounts() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaccount": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaccount(mc, incemp) except: print access.listreceivedbyaccount() except: print "\n---An error occurred---\n" elif cmd == "listreceivedbyaddress": try: mc = raw_input("Minimum confirmations (optional): ") incemp = raw_input("Include empty? (true/false, optional): ") try: print access.listreceivedbyaddress(mc, incemp) except: print access.listreceivedbyaddress() except: print "\n---An error occurred---\n" elif cmd == "listtransactions": try: acct = raw_input("Account (optional): ") count = raw_input("Number of transactions (optional): ") frm = raw_input("Skip (optional):") try: print access.listtransactions(acct, count, frm) except: print access.listtransactions() except: print "\n---An error occurred---\n" elif cmd == "move": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.move(frm, to, amt, mc, comment) except: print access.move(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendfrom": try: frm = raw_input("From: ") to = raw_input("To: ") amt = raw_input("Amount:") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendfrom(frm, to, amt, mc, comment, commentto) except: print access.sendfrom(frm, to, amt) except: print "\n---An error occurred---\n" elif cmd == "sendmany": try: frm = raw_input("From: ") to = raw_input("To (in format address1:amount1,address2:amount2,...): ") mc = raw_input("Minimum confirmations (optional): ") comment = raw_input("Comment (optional): ") try: print access.sendmany(frm,to,mc,comment) except: print access.sendmany(frm,to) except: print "\n---An error occurred---\n" elif cmd == "sendtoaddress": try: to = raw_input("To (in format address1:amount1,address2:amount2,...): ") amt = raw_input("Amount:") comment = raw_input("Comment (optional): ") commentto = raw_input("Comment-to (optional): ") try: print access.sendtoaddress(to,amt,comment,commentto) except: print access.sendtoaddress(to,amt) except: print "\n---An error occurred---\n" elif cmd == "setaccount": try: addr = raw_input("Address: ") acct = raw_input("Account:") print access.setaccount(addr,acct) except: print "\n---An error occurred---\n" elif cmd == "setgenerate": try: gen= raw_input("Generate? (true/false): ") cpus = raw_input("Max processors/cores (-1 for unlimited, optional):") try: print access.setgenerate(gen, cpus) except: print access.setgenerate(gen) except: print "\n---An error occurred---\n" elif cmd == "settxfee": try: amt = raw_input("Amount:") print access.settxfee(amt) except: print "\n---An error occurred---\n" elif cmd == "stop": try: print access.stop() except: print "\n---An error occurred---\n" elif cmd == "validateaddress": try: addr = raw_input("Address: ") print access.validateaddress(addr) except: print "\n---An error occurred---\n" elif cmd == "walletpassphrase": try: pwd = getpass.getpass(prompt="Enter wallet passphrase: ") access.walletpassphrase(pwd, 60) print "\n---Wallet unlocked---\n" except: print "\n---An error occurred---\n" elif cmd == "walletpassphrasechange": try: pwd = getpass.getpass(prompt="Enter old wallet passphrase: ") pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ") access.walletpassphrasechange(pwd, pwd2) print print "\n---Passphrase changed---\n" except: print print "\n---An error occurred---\n" print else: print "Command not found or not supported"
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for building profiler options.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.profiler import tfprof_logger class ProfileOptionBuilder(object): # pylint: disable=line-too-long """Option Builder for Profiling API. For tutorial on the options, see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md ```python # Users can use pre-built options: opts = ( tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()) # Or, build your own options: opts = (tf.profiler.ProfileOptionBuilder() .with_max_depth(10) .with_min_micros(1000) .select(['accelerator_micros']) .with_stdout_output() .build() # Or customize the pre-built options: opts = (tf.profiler.ProfileOptionBuilder( tf.profiler.ProfileOptionBuilder.time_and_memory()) .with_displaying_options(show_name_regexes=['.*rnn.*']) .build()) # Finally, profiling with the options: _ = tf.profiler.profile(tf.get_default_graph(), run_meta=run_meta, cmd='scope', options=opts) ``` """ # pylint: enable=line-too-long def __init__(self, options=None): """Constructor. Args: options: Optional initial option dict to start with. """ if options is not None: self._options = copy.deepcopy(options) else: self._options = {'max_depth': 100, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'name', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': False, 'select': ['micros'], 'step': -1, 'output': 'stdout'} @staticmethod def trainable_variables_parameter(): """Options used to profile trainable variable parameters. Normally used together with 'scope' view. Returns: A dict of profiling options. """ return {'max_depth': 10000, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'name', 'account_type_regexes': [tfprof_logger.TRAINABLE_VARIABLES], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['params'], 'step': -1, 'output': 'stdout'} @staticmethod def float_operation(): # pylint: disable=line-too-long """Options used to profile float operations. Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/profile_model_architecture.md on the caveats of calculating float operations. Returns: A dict of profiling options. """ # pylint: enable=line-too-long return {'max_depth': 10000, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 1, 'min_occurrence': 0, 'order_by': 'float_ops', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['float_ops'], 'step': -1, 'output': 'stdout'} @staticmethod def time_and_memory(min_micros=1, min_bytes=1, min_accelerator_micros=0, min_cpu_micros=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0): """Show operation time and memory consumptions. Args: min_micros: Only show profiler nodes with execution time no less than this. It sums accelerator and cpu times. min_bytes: Only show profiler nodes requested to allocate no less bytes than this. min_accelerator_micros: Only show profiler nodes spend no less than this time on accelerator (e.g. GPU). min_cpu_micros: Only show profiler nodes spend no less than this time on cpu. min_peak_bytes: Only show profiler nodes using no less than this bytes at peak (high watermark). For profiler nodes consist of multiple graph nodes, it sums the graph nodes' peak_bytes. min_residual_bytes: Only show profiler nodes have no less than this bytes not being de-allocated after Compute() ends. For profiler nodes consist of multiple graph nodes, it sums the graph nodes' residual_bytes. min_output_bytes: Only show profiler nodes have no less than this bytes output. The output are not necessarily allocated by this profiler nodes. Returns: A dict of profiling options. """ return {'max_depth': 10000, 'min_bytes': min_bytes, 'min_peak_bytes': min_peak_bytes, 'min_residual_bytes': min_residual_bytes, 'min_output_bytes': min_output_bytes, 'min_micros': min_micros, 'min_accelerator_micros': min_accelerator_micros, 'min_cpu_micros': min_cpu_micros, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'micros', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['micros', 'bytes'], 'step': -1, 'output': 'stdout'} def build(self): """Build a profiling option. Returns: A dict of profiling options. """ return copy.deepcopy(self._options) def with_max_depth(self, max_depth): """Set the maximum depth of display. The depth depends on profiling view. For 'scope' view, it's the depth of name scope hierarchy (tree), for 'op' view, it's the number of operation types (list), etc. Args: max_depth: Maximum depth of the data structure to display. Returns: self """ self._options['max_depth'] = max_depth return self def with_min_memory(self, min_bytes=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0): """Only show profiler nodes consuming no less than 'min_bytes'. Args: min_bytes: Only show profiler nodes requested to allocate no less bytes than this. min_peak_bytes: Only show profiler nodes using no less than this bytes at peak (high watermark). For profiler nodes consist of multiple graph nodes, it sums the graph nodes' peak_bytes. min_residual_bytes: Only show profiler nodes have no less than this bytes not being de-allocated after Compute() ends. For profiler nodes consist of multiple graph nodes, it sums the graph nodes' residual_bytes. min_output_bytes: Only show profiler nodes have no less than this bytes output. The output are not necessarily allocated by this profiler nodes. Returns: self """ self._options['min_bytes'] = min_bytes self._options['min_peak_bytes'] = min_peak_bytes self._options['min_residual_bytes'] = min_residual_bytes self._options['min_output_bytes'] = min_output_bytes return self def with_min_execution_time(self, min_micros=0, min_accelerator_micros=0, min_cpu_micros=0): """Only show profiler nodes consuming no less than 'min_micros'. Args: min_micros: Only show profiler nodes with execution time no less than this. It sums accelerator and cpu times. min_accelerator_micros: Only show profiler nodes spend no less than this time on accelerator (e.g. GPU). min_cpu_micros: Only show profiler nodes spend no less than this time on cpu. Returns: self """ self._options['min_micros'] = min_micros self._options['min_accelerator_micros'] = min_accelerator_micros self._options['min_cpu_micros'] = min_cpu_micros return self def with_min_parameters(self, min_params): """Only show profiler nodes holding no less than 'min_params' parameters. 'Parameters' normally refers the weights of in TensorFlow variables. It reflects the 'capacity' of models. Args: min_params: Only show profiler nodes holding number parameters no less than this. Returns: self """ self._options['min_params'] = min_params return self def with_min_occurrence(self, min_occurrence): # pylint: disable=line-too-long """Only show profiler nodes including no less than 'min_occurrence' graph nodes. A "node" means a profiler output node, which can be a python line (code view), an operation type (op view), or a graph node (graph/scope view). A python line includes all graph nodes created by that line, while an operation type includes all graph nodes of that type. Args: min_occurrence: Only show nodes including no less than this. Returns: self """ # pylint: enable=line-too-long self._options['min_occurrence'] = min_occurrence return self def with_min_float_operations(self, min_float_ops): # pylint: disable=line-too-long """Only show profiler nodes consuming no less than 'min_float_ops'. Please see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profilerg3doc/profile_model_architecture.md on the caveats of calculating float operations. Args: min_float_ops: Only show profiler nodes with float operations no less than this. Returns: self """ # pylint: enable=line-too-long self._options['min_float_ops'] = min_float_ops return self def with_accounted_types(self, account_type_regexes): """Selectively counting statistics based on node types. Here, 'types' means the profiler nodes' properties. Profiler by default consider device name (e.g. /job:xx/.../device:GPU:0) and operation type (e.g. MatMul) as profiler nodes' properties. User can also associate customized 'types' to profiler nodes through OpLogProto proto. For example, user can select profiler nodes placed on gpu:0 with: `account_type_regexes=['.*gpu:0.*']` If none of a node's properties match the specified regexes, the node is not displayed nor accounted. Args: account_type_regexes: A list of regexes specifying the types. Returns: self. """ self._options['account_type_regexes'] = copy.copy(account_type_regexes) return self def with_node_names(self, start_name_regexes=None, show_name_regexes=None, hide_name_regexes=None, trim_name_regexes=None): """Regular expressions used to select profiler nodes to display. After 'with_accounted_types' is evaluated, 'with_node_names' are evaluated as follows: For a profile data structure, profiler first finds the profiler nodes matching 'start_name_regexes', and starts displaying profiler nodes from there. Then, if a node matches 'show_name_regexes' and doesn't match 'hide_name_regexes', it's displayed. If a node matches 'trim_name_regexes', profiler stops further searching that branch. Args: start_name_regexes: list of node name regexes to start displaying. show_name_regexes: list of node names regexes to display. hide_name_regexes: list of node_names regexes that should be hidden. trim_name_regexes: list of node name regexes from where to stop. Returns: self """ if start_name_regexes is not None: self._options['start_name_regexes'] = copy.copy(start_name_regexes) if show_name_regexes is not None: self._options['show_name_regexes'] = copy.copy(show_name_regexes) if hide_name_regexes is not None: self._options['hide_name_regexes'] = copy.copy(hide_name_regexes) if trim_name_regexes is not None: self._options['trim_name_regexes'] = copy.copy(trim_name_regexes) return self def account_displayed_op_only(self, is_true): """Whether only account the statistics of displayed profiler nodes. Args: is_true: If true, only account statistics of nodes eventually displayed by the outputs. Otherwise, a node's statistics are accounted by its parents as long as it's types match 'account_type_regexes', even if it is hidden from the output, say, by hide_name_regexes. Returns: self """ self._options['account_displayed_op_only'] = is_true return self def with_empty_output(self): """Do not generate side-effect outputs.""" self._options['output'] = 'none' return self def with_stdout_output(self): """Print the result to stdout.""" self._options['output'] = 'stdout' return self def with_file_output(self, outfile): """Print the result to a file.""" self._options['output'] = 'file:outfile=%s' % outfile return self def with_timeline_output(self, timeline_file): """Generate a timeline json file.""" self._options['output'] = 'timeline:outfile=%s' % timeline_file return self def with_pprof_output(self, pprof_file): """Generate a pprof profile gzip file. To use the pprof file: pprof -png --nodecount=100 --sample_index=1 <pprof_file> Args: pprof_file: filename for output, usually suffixed with .pb.gz. Returns: self. """ self._options['output'] = 'pprof:outfile=%s' % pprof_file return self def order_by(self, attribute): # pylint: disable=line-too-long """Order the displayed profiler nodes based on a attribute. Supported attribute includes micros, bytes, occurrence, params, etc. https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md Args: attribute: An attribute the profiler node has. Returns: self """ # pylint: enable=line-too-long self._options['order_by'] = attribute return self def select(self, attributes): # pylint: disable=line-too-long """Select the attributes to display. See https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md for supported attributes. Args: attributes: A list of attribute the profiler node has. Returns: self """ # pylint: enable=line-too-long self._options['select'] = copy.copy(attributes) return self def with_step(self, step): """Which profile step to use for profiling. The 'step' here refers to the step defined by `Profiler.add_step()` API. Args: step: When multiple steps of profiles are available, select which step's profile to use. If -1, use average of all available steps. Returns: self """ self._options['step'] = step return self
""" pygments.lexers.webidl ~~~~~~~~~~~~~~~~~~~~~~ Lexers for Web IDL, including some extensions. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, default, include, words from pygments.token import Comment, Keyword, Name, Number, Punctuation, \ String, Text __all__ = ['WebIDLLexer'] _builtin_types = ( # primitive types 'byte', 'octet', 'boolean', r'(?:unsigned\s+)?(?:short|long(?:\s+long)?)', r'(?:unrestricted\s+)?(?:float|double)', # string types 'DOMString', 'ByteString', 'USVString', # exception types 'Error', 'DOMException', # typed array types 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Uint8ClampedArray', 'Float32Array', 'Float64Array', # buffer source types 'ArrayBuffer', 'DataView', 'Int8Array', 'Int16Array', 'Int32Array', # other 'any', 'void', 'object', 'RegExp', ) _identifier = r'_?[A-Za-z][a-zA-Z0-9_-]*' _keyword_suffix = r'(?![\w-])' _string = r'"[^"]*"' class WebIDLLexer(RegexLexer): """ For Web IDL. .. versionadded:: 2.6 """ name = 'Web IDL' aliases = ['webidl'] filenames = ['*.webidl'] tokens = { 'common': [ (r'\s+', Text), (r'(?s)/\*.*?\*/', Comment.Multiline), (r'//.*', Comment.Single), (r'^#.*', Comment.Preproc), ], 'root': [ include('common'), (r'\[', Punctuation, 'extended_attributes'), (r'partial' + _keyword_suffix, Keyword), (r'typedef' + _keyword_suffix, Keyword, ('typedef', 'type')), (r'interface' + _keyword_suffix, Keyword, 'interface_rest'), (r'enum' + _keyword_suffix, Keyword, 'enum_rest'), (r'callback' + _keyword_suffix, Keyword, 'callback_rest'), (r'dictionary' + _keyword_suffix, Keyword, 'dictionary_rest'), (r'namespace' + _keyword_suffix, Keyword, 'namespace_rest'), (_identifier, Name.Class, 'implements_rest'), ], 'extended_attributes': [ include('common'), (r',', Punctuation), (_identifier, Name.Decorator), (r'=', Punctuation, 'extended_attribute_rest'), (r'\(', Punctuation, 'argument_list'), (r'\]', Punctuation, '#pop'), ], 'extended_attribute_rest': [ include('common'), (_identifier, Name, 'extended_attribute_named_rest'), (_string, String), (r'\(', Punctuation, 'identifier_list'), default('#pop'), ], 'extended_attribute_named_rest': [ include('common'), (r'\(', Punctuation, 'argument_list'), default('#pop'), ], 'argument_list': [ include('common'), (r'\)', Punctuation, '#pop'), default('argument'), ], 'argument': [ include('common'), (r'optional' + _keyword_suffix, Keyword), (r'\[', Punctuation, 'extended_attributes'), (r',', Punctuation, '#pop'), (r'\)', Punctuation, '#pop:2'), default(('argument_rest', 'type')) ], 'argument_rest': [ include('common'), (_identifier, Name.Variable), (r'\.\.\.', Punctuation), (r'=', Punctuation, 'default_value'), default('#pop'), ], 'identifier_list': [ include('common'), (_identifier, Name.Class), (r',', Punctuation), (r'\)', Punctuation, '#pop'), ], 'type': [ include('common'), (r'(?:' + r'|'.join(_builtin_types) + r')' + _keyword_suffix, Keyword.Type, 'type_null'), (words(('sequence', 'Promise', 'FrozenArray'), suffix=_keyword_suffix), Keyword.Type, 'type_identifier'), (_identifier, Name.Class, 'type_identifier'), (r'\(', Punctuation, 'union_type'), ], 'union_type': [ include('common'), (r'or' + _keyword_suffix, Keyword), (r'\)', Punctuation, ('#pop', 'type_null')), default('type'), ], 'type_identifier': [ (r'<', Punctuation, 'type_list'), default(('#pop', 'type_null')) ], 'type_null': [ (r'\?', Punctuation), default('#pop:2'), ], 'default_value': [ include('common'), include('const_value'), (_string, String, '#pop'), (r'\[\s*\]', Punctuation, '#pop'), ], 'const_value': [ include('common'), (words(('true', 'false', '-Infinity', 'Infinity', 'NaN', 'null'), suffix=_keyword_suffix), Keyword.Constant, '#pop'), (r'-?(?:(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:[Ee][+-]?[0-9]+)?' + r'|[0-9]+[Ee][+-]?[0-9]+)', Number.Float, '#pop'), (r'-?[1-9][0-9]*', Number.Integer, '#pop'), (r'-?0[Xx][0-9A-Fa-f]+', Number.Hex, '#pop'), (r'-?0[0-7]*', Number.Oct, '#pop'), ], 'typedef': [ include('common'), (_identifier, Name.Class), (r';', Punctuation, '#pop'), ], 'namespace_rest': [ include('common'), (_identifier, Name.Namespace), (r'\{', Punctuation, 'namespace_body'), (r';', Punctuation, '#pop'), ], 'namespace_body': [ include('common'), (r'\[', Punctuation, 'extended_attributes'), (r'readonly' + _keyword_suffix, Keyword), (r'attribute' + _keyword_suffix, Keyword, ('attribute_rest', 'type')), (r'const' + _keyword_suffix, Keyword, ('const_rest', 'type')), (r'\}', Punctuation, '#pop'), default(('operation_rest', 'type')), ], 'interface_rest': [ include('common'), (_identifier, Name.Class), (r':', Punctuation), (r'\{', Punctuation, 'interface_body'), (r';', Punctuation, '#pop'), ], 'interface_body': [ (words(('iterable', 'maplike', 'setlike'), suffix=_keyword_suffix), Keyword, 'iterable_maplike_setlike_rest'), (words(('setter', 'getter', 'creator', 'deleter', 'legacycaller', 'inherit', 'static', 'stringifier', 'jsonifier'), suffix=_keyword_suffix), Keyword), (r'serializer' + _keyword_suffix, Keyword, 'serializer_rest'), (r';', Punctuation), include('namespace_body'), ], 'attribute_rest': [ include('common'), (_identifier, Name.Variable), (r';', Punctuation, '#pop'), ], 'const_rest': [ include('common'), (_identifier, Name.Constant), (r'=', Punctuation, 'const_value'), (r';', Punctuation, '#pop'), ], 'operation_rest': [ include('common'), (r';', Punctuation, '#pop'), default('operation'), ], 'operation': [ include('common'), (_identifier, Name.Function), (r'\(', Punctuation, 'argument_list'), (r';', Punctuation, '#pop:2'), ], 'iterable_maplike_setlike_rest': [ include('common'), (r'<', Punctuation, 'type_list'), (r';', Punctuation, '#pop'), ], 'type_list': [ include('common'), (r',', Punctuation), (r'>', Punctuation, '#pop'), default('type'), ], 'serializer_rest': [ include('common'), (r'=', Punctuation, 'serialization_pattern'), (r';', Punctuation, '#pop'), default('operation'), ], 'serialization_pattern': [ include('common'), (_identifier, Name.Variable, '#pop'), (r'\{', Punctuation, 'serialization_pattern_map'), (r'\[', Punctuation, 'serialization_pattern_list'), ], 'serialization_pattern_map': [ include('common'), (words(('getter', 'inherit', 'attribute'), suffix=_keyword_suffix), Keyword), (r',', Punctuation), (_identifier, Name.Variable), (r'\}', Punctuation, '#pop:2'), ], 'serialization_pattern_list': [ include('common'), (words(('getter', 'attribute'), suffix=_keyword_suffix), Keyword), (r',', Punctuation), (_identifier, Name.Variable), (r']', Punctuation, '#pop:2'), ], 'enum_rest': [ include('common'), (_identifier, Name.Class), (r'\{', Punctuation, 'enum_body'), (r';', Punctuation, '#pop'), ], 'enum_body': [ include('common'), (_string, String), (r',', Punctuation), (r'\}', Punctuation, '#pop'), ], 'callback_rest': [ include('common'), (r'interface' + _keyword_suffix, Keyword, ('#pop', 'interface_rest')), (_identifier, Name.Class), (r'=', Punctuation, ('operation', 'type')), (r';', Punctuation, '#pop'), ], 'dictionary_rest': [ include('common'), (_identifier, Name.Class), (r':', Punctuation), (r'\{', Punctuation, 'dictionary_body'), (r';', Punctuation, '#pop'), ], 'dictionary_body': [ include('common'), (r'\[', Punctuation, 'extended_attributes'), (r'required' + _keyword_suffix, Keyword), (r'\}', Punctuation, '#pop'), default(('dictionary_item', 'type')), ], 'dictionary_item': [ include('common'), (_identifier, Name.Variable), (r'=', Punctuation, 'default_value'), (r';', Punctuation, '#pop'), ], 'implements_rest': [ include('common'), (r'implements' + _keyword_suffix, Keyword), (_identifier, Name.Class), (r';', Punctuation, '#pop'), ], }
import sys sys.path.append("..") # Adds higher directory to python modules path. import unittest, math from Laminate import Laminate class LaminateTestCase(unittest.TestCase): """ test cases for Laminate class """ laminate = None def setUp(self): kerf = 5.0/64 # usu. 1/8" for most fine cut blades rdus = 10 lengthDesign = 20 heightDesign = 7 self.laminate = Laminate('45/60A/45D', lengthDesign, heightDesign, rdus, kerf) def test_parseCmd(self): """ tests string command parsing for Laminates """ strCmd = '' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '-1' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '10/0A' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '90/10A' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '10A/45D' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '10D/45D' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '45/' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '/45' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '45' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 1) # ---------------------- strCmd = '45/60D/' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '/45/60D/' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '45/60F' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '45/60A' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 2) strCmd = '45/60D' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 2) # ---------------------- strCmd = '45/60TEST/30A' result = self.laminate.parseCmd(strCmd)['angle'] self.assertEqual(len(result), 0) strCmd = '45/60D/30A' result = self.laminate.parseCmd(strCmd) self.assertEqual(len(result['angle']), 3) self.assertEqual(result['angle'][0], math.radians(45)) self.assertEqual(result['angle'][1], math.radians(60)) self.assertEqual(result['angle'][2], math.radians(30)) self.assertEqual(result['location'][0], 'Any') self.assertEqual(result['location'][1], 'Descending') self.assertEqual(result['location'][2], 'Ascending') strCmd = '45/60D/30D' result = self.laminate.parseCmd(strCmd) self.assertEqual(len(result['angle']), 3) self.assertEqual(result['angle'][0], math.radians(45)) self.assertEqual(result['angle'][1], math.radians(60)) self.assertEqual(result['angle'][2], math.radians(30)) self.assertEqual(result['location'][0], 'Any') self.assertEqual(result['location'][1], 'Descending') self.assertEqual(result['location'][2], 'Descending') def test_getSinAnglesMultiplier(self): # test indices indexBegin = -1 indexEnd = 2 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, -1) indexBegin = 0 indexEnd = -1 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, -1) indexBegin = 2 indexEnd = 1 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, -1) indexBegin = 0 indexEnd = 10 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, -1) indexBegin = 0 indexEnd = 0 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, 1.414213562373095) indexBegin = 0 indexEnd = 1 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, 2.449489742783178) indexBegin = 0 indexEnd = 2 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, 3.464101615137754) indexBegin = 1 indexEnd = 2 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, 2.449489742783178) indexBegin = 1 indexEnd = 1 result = self.laminate.getSinAnglesMultiplier(indexBegin, indexEnd) self.assertEqual(result, 1.7320508075688772) """ test calculating cut width of original laminate from different generations rdus: 10, rdu width: 2", toFit: 20", kerf: 5/64", design pieces cut width: 1" """ def test_getBaseCutWidth(self): # testing 3rd generation : 45/60A/45D result = self.laminate.getBaseCutWidth() self.assertEqual(result, 0.48524817793679187) # testing 4th generation : 45/60A/45D/30A self.laminate.parseCmd('45/60A/45D/30A') result = self.laminate.getBaseCutWidth() self.assertEqual(result, 0.5171425756292812) # testing 2nd generation : 45/60A self.laminate.parseCmd('45/60A') result = self.laminate.getBaseCutWidth() self.assertEqual(result, 0.6224557589700653) # testing 1st generation : 45 self.laminate.parseCmd('45') result = self.laminate.getBaseCutWidth() self.assertEqual(result, 1.078125) # empty command string self.laminate.setEmptyCmd() result = self.laminate.getBaseCutWidth() self.assertEqual(result, -1) def test_getCutLength(self): result = self.laminate.getCutLength(0, self.laminate.cutCmd["angle"][0]) self.assertEqual(result, -1) result = self.laminate.getCutLength(5, math.radians(90)) self.assertEqual(result, -1) result = self.laminate.getCutLength(5, math.radians(0)) self.assertEqual(result, -1) result = self.laminate.getCutLength(5, self.laminate.cutCmd["angle"][0]) self.assertEqual(result, 7.181553246425874) def test_getNumberCuts(self): # testing 3rd generation : 45/60A/45D ; rdus: 10 result = self.laminate.getNumberCuts(3) self.assertEqual(result, 160) # testing 2nd generation : 45/60A; rdus: 10 result = self.laminate.getNumberCuts(2) self.assertEqual(result, 80) # testing 1st generation : 45; rdus: 10 result = self.laminate.getNumberCuts(1) self.assertEqual(result, 40) # testing 4th generation : 45/60A/45D/30A; rdus: 10 result = self.laminate.getNumberCuts(4) self.assertEqual(result, 320) # error generation empty cmd result = self.laminate.getNumberCuts(0) self.assertEqual(result, -1) def test_getPreviousGenerationHeight(self): cutLength = 0 angle = 0.5 heightDesign = 10 result = self.laminate.getPreviousGenerationHeight(cutLength, angle, heightDesign) self.assertEqual(result, -1) cutLength = 1 angle = 0 heightDesign = 10 result = self.laminate.getPreviousGenerationHeight(cutLength, angle, heightDesign) self.assertEqual(result, -1) cutLength = 1 angle = 0.5 heightDesign = 0 result = self.laminate.getPreviousGenerationHeight(cutLength, angle, heightDesign) self.assertEqual(result, -1) # angle: 60 degrees, cut length = 1.24491151794013 inches, height design = 7 inches angle = math.radians(60) cutLength = self.laminate.getCutLength(1, angle) heightDesign = 7 result = self.laminate.getPreviousGenerationHeight(cutLength, angle, heightDesign) self.assertEqual(result, 6.410003913447593) def test_setHeights(self): #print "::Test GetHeights()" self.laminate.setHeights() #print self.laminate.cutCmd["height"] self.assertEqual(self.laminate.cutCmd["height"], [4.830008258375506, 6.410003913447593, 7]) if __name__ == '__main__': unittest.main()
from discord import errors as discord_error from discord.ext.commands import HelpCommand from prettytable import PrettyTable from discord.ext import commands from .utils import utils import traceback import datetime import logging import discord class Events(commands.Cog): def __init__(self,bot): self.bot = bot self.redis = bot.db.redis self.error_log = False self.debug_cog = {} def Time(self): return datetime.datetime.now().strftime("%b/%d/%Y %H:%M:%S") ############################################################# # _ _ _ # # | | (_) | | # # | | _ ___ | |_ ___ _ __ ___ _ __ # # | | | | / __| | __| / _ \ | '_ \ / _ \ | '__| # # | |____ | | \__ \ | |_ | __/ | | | | | __/ | | # # |______| |_| |___/ \__| \___| |_| |_| \___| |_| # # # ############################################################# @commands.Cog.listener() async def on_guild_join(self,guild): #IF Bot join guild, it will add to record of those. print ("\033[96m<EVENT JOIN>: \033[94m {} :({}) -- {}\033[00m".format(self.Time(), guild.id, guild.name)) utils.prGreen("\t\t Servers: {}\t\t Members: {}".format(len(self.bot.guilds), len(self.bot.users))) await self.redis.hset("Info:Server",str(guild.id),str(guild.name)) await self.redis.set("Info:Total Server",len(self.bot.guilds)) await self.redis.set("Info:Total Member",len(self.bot.users)) if self.redis.exists("{}:Config:Cogs".format(guild.id)): # if it exist, and going to be expire soon, best to persists them, so they don't lost data async for key in self.redis.iscan(match="{}*".format(guild.id)): await self.redis.persist(key) else: await self.redis.set("{}:Config:CMD_Prefix".format(guild.id),"!") #Server setting await self.redis.hset("{}:Config:Delete_MSG".format(guild.id),"core","off") #just in case @commands.Cog.listener() async def on_guild_remove(self,guild): #IF bot left or no longer in that guild. It will remove this print("\033[91m<EVENT LEFT>:\033[94m[ {} : \033[96m({})\033[92m -- {}\033[00m".format(self.Time(), guild.id, guild.name)) utils.prGreen("\t\t Severs:{}\t\tMembers:{}".format(len(self.bot.guilds), len(self.bot.users))) await self.redis.hdel("Info:Server",guild.id) age = datetime.datetime.utcnow() - guild.me.joined_at if guild.me is not None else None #return None instead of giving error chuck not update? #Set all database to expire, will expire in 30 days, so This way, it can save some space,it would unto when it is back to that guild and setting changed. count = 0 async for key in self.redis.iscan(match="{}*".format(guild.id)): await self.redis.expire(key,1209600) count += 1 if age is not None: utils.prGreen("{0.days} day, {0.seconds} seconds".format(age)) utils.prGreen("Set {} expire".format(count)) @commands.Cog.listener() async def on_guild_update(self,before,after): #If guild update name and w/e, just in case, Update those print("\033[95m<EVENT Update>:\033[94m {} :\033[96m {} \033[93m | \033[92m({}) -- {}\033[00m".format(self.Time(),after.name,after.id, after)) if after.icon: await self.redis.hset("Info:Server_Icon",after.id,after.icon) await self.redis.hset("Info:Server",after.id,after.name) @commands.Cog.listener() async def on_member_join(self,member): print("\033[98m<Event Member Join>:\033[94m {} :\033[96m {} ||| \033[93m ({})\033[92m -- {} ||| {}\033[00m".format(self.Time(), member.guild.name, member.guild.id, member.name, member.id)) await self.redis.set("Info:Total Member",len(set(self.bot.get_all_members()))) @commands.Cog.listener() async def on_member_remove(self,member): print("\033[93m<Event Member Left>:\033[94m {}:\033[96m {} ||| \033[93m ({})\033[92m -- {} ||| {}\033[00m".format(self.Time(), member.guild.name, member.guild.id, member.name, member.id)) await self.redis.set("Info:Total Member",len(set(self.bot.get_all_members()))) @commands.Cog.listener() async def on_member_update(self,before,after): check = await self.redis.get("Member_Update:{}:check".format(after.id)) if check: #If it true, return, it haven't cool down yet return if before.avatar != after.avatar: if after.avatar is None: return print("\033[97m<Event Member Update Avatar>:\033[94m {} :\033[92m {} ||| {}\033[00m".format(self.Time(), after.name, after.id)) await self.redis.hset("Info:Icon",after.id,after.avatar) await self.redis.set("Member_Update:{}:check".format(after.id),'cooldown',expire=30) #To stop multi update if before.name != after.name: print("\033[97m<Event Member Update Name>: \033[94m {}:\033[93m Before : {} |||\033[92m After : {} ||| {}\033[00m".format(self.Time(),before.name,after.name, after.id)) await self.redis.hset("Info:Name",after.id,str(after)) await self.redis.set("Member_Update:{}:check".format(after.id),'cooldown',expire=30) #To stop multi update @commands.Cog.listener() async def on_command(self,ctx): if isinstance(ctx.message.channel,discord.DMChannel): return print("\033[96m<Event Command>\033[94m {0}:\033[96m {1.guild.name} ||| \033[93m {1.author} ||| \033[94m ({1.author.id})\033[92m ||| {1.clean_content}\033[00m".format(self.Time(), ctx.message)) @commands.Cog.listener() async def on_message(self,msg): if self.bot.user.id == msg.author.id: if isinstance(msg.channel,discord.DMChannel) is False: try: if self.bot.log_config.get(msg.guild.id): if str(msg.channel.id) in self.bot.log_config[msg.guild.id]['channel']: return except: pass if isinstance(msg.channel,discord.TextChannel) is False: if msg.author.id == self.bot.user.id and (msg.channel,discord.DMChannel) and msg.channel.recipient.id == 105853969175212032: return #no need to pm me. cuz it is likely an error. utils.prCyan("PRIVATE") utils.prGreen("<Event Send> {} : {} |||{}".format(self.Time(), msg.author.name, msg.clean_content)) else: try: if msg.embeds: table = PrettyTable() #best to use it i guess data = msg.embeds[0].fields if data: for x in data: table.add_column(x.name,x.value.split("\n")) content = str(msg.embeds[0].description) +"\n" if bool(table.field_names): #if there is actually contents inside, then we will add it to list. content +="\n" + table.get_string() else: content = msg.clean_content utils.prGreen("<Event Send> {} : {} ||| {} ||| ({}) ||| {}".format(self.Time(), msg.author.name,msg.guild.name,msg.guild.id, content)) except: utils.prGreen("<Event Send> {} : {} ||| {} ||| ({}) ||| {}".format(self.Time(), msg.author.name,msg.guild.name,msg.guild.id,msg.embeds)) @commands.Cog.listener() async def on_command_completion(self,ctx): if ctx.command.cog_name is None or isinstance(ctx.message.channel,discord.DMChannel): return try: print(ctx.command.cog_name) check = await self.bot.db.redis.hgetall("{}:Config:Delete_MSG".format(ctx.message.guild.id)) if check.get(ctx.command.cog_name.lower()) == "on": await ctx.message.delete() await self.redis.hincrby("{0.guild.id}:Total_Command:{0.author.id}".format(ctx.message),ctx.invoked_with, increment=1) await self.redis.hincrby("Info:Total_Command", ctx.invoked_with, increment=1) await self.redis.hincrby("{0.guild.id}:Total_Command:User:{0.author.id}".format(ctx.message),ctx.invoked_with, increment=1) except: utils.prRed("Failed to delete user command - {0.name} - {0.id}\n".format(ctx.message.guild)) utils.prRed(traceback.format_exc()) @commands.Cog.listener() async def on_command_error(self,ctx,error): if self.bot.user.id == 181503794532581376 or self.error_log: print(error) if isinstance(error, commands.MissingRequiredArgument): self.bot.help_command.context = ctx await self.bot.help_command.command_callback(ctx,command= str(ctx.command)) elif isinstance(error,commands.BadArgument): self.bot.help_command.context = ctx await self.bot.help_command.command_callback(ctx,command= str(ctx.command)) elif isinstance(error,commands.NoPrivateMessage): await ctx.author.send("This command cannot be used in private message") elif isinstance(error, commands.CommandInvokeError): if isinstance(error.original,discord_error.Forbidden): await ctx.send("I am sorry, I need a certain permission to run it...") traceback.print_exception(type(error), error, error.__traceback__) return utils.prRed(type(error.original)) errors = traceback.format_exception(type(error), error, error.__traceback__) Current_Time = datetime.datetime.utcnow().strftime("%b/%d/%Y %H:%M:%S UTC") utils.prRed(Current_Time) utils.prRed("Error!") traceback.print_exception(type(error), error, error.__traceback__) cog_error = '```fix\nCogs:{0.command.cog_name}\tCommand:{0.command}\tAuthor:{0.message.author}-{0.message.author.id}\n' \ 'Server:{0.message.guild.id}\n{0.message.clean_content}\nError:\n{1}```'.format(ctx,error) msg ="```py\n{}```\n{}\n```py\n{}\n```".format(Current_Time + "\n"+ "ERROR!",cog_error,"".join(errors).replace("`","")) if len(msg) >= 1900: msg = await utils.send_hastebin(msg) # await self.bot.owner.send(msg or "There is no message but error...") await self.bot.error_channel.send(msg or "There is no message but error...") await ctx.send("You either used the command incorrectly or an unexpected error occurred. A report has been sent to the creator so you can hope for a fix soon.") @commands.command(hidden = True) @commands.check(utils.is_owner) async def set_error(self,ctx,cog=None): """ On a prod guild, it can get very spamming, so I would set it for just in case... """ if cog: check = self.debug_cog.get(cog) if check or check is False: log = logging.getLogger("cogs.{}".format(cog)) if check == True: log.setLevel(logging.INFO) utils.prPurple("Getting info to paste into hastebin") with open("bot_log.txt","r+") as f: msg = await utils.send_hastebin(f.read()) await ctx.send(content = msg) else: log.setLevel(logging.DEBUG) self.debug_cog[cog] = not(check) await ctx.send("Set to {}".format(not(check))) else: log = logging.getLogger("cogs.{}".format(cog )) log.setLevel(logging.DEBUG) format_log = logging.Formatter('%(asctime)s:\t%(levelname)s:\t%(name)s:\tFunction:%(funcName)s ||| MSG: %(message)s') console = logging.StreamHandler() console.setFormatter(format_log) handler = logging.FileHandler(filename='bot_log.txt', encoding='utf-8', mode='w') handler.setFormatter(format_log) log.addHandler(console) log.addHandler(handler) self.debug_cog[cog] = True await ctx.send("Set to True") else: self.error_log = not (self.error_log) await ctx.send("Set {}".format(self.error_log)) def setup(bot): bot.add_cog(Events(bot))
#!/usr/bin/python '''This has been slightly modified from the original. I believe there was an error that caused the icons to become random noise. It is likely the fix only works for icons of exactly size 256x256 ''' # pyico # Copyright (C) 2009 Nikolai Ugelvik # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import sys import struct import math from PIL import Image class Icon(object): def __init__(self, image_paths=None, output_path=None): self.image_paths = [] if image_paths: self.image_paths = image_paths self.output_path = None if output_path: self.output_path = output_path self._ico_data = "" self._img_data = "" # Conversion info self._convert_rgb = False def convert_rgb(self, boolean): self._convert_rgb = boolean def getdata(self): return self._ico_data + self._img_data def save(self): if not self._ico_data or not self._img_data: self._build() if self.output_path: with open(self.output_path, 'wb') as f: f.write(self._ico_data) f.write(self._img_data) else: raise Exception("Missing output path.") def __load_image(self, image_path): img = Image.open(image_path) img.load() # fix for a bug in PIL 1.1.7 if self._convert_rgb: if 'A' in img.mode or 'transparency' in img.info: img = img.convert('RGBA') else: img = img.convert('RGB') if img.mode == 'RGB': imgdata = img.tobytes('raw', 'BGR', 0, -1) elif img.mode == 'RGBA': r, g, b, a = img.split() img = Image.merge('RGBA', (b, g, r, a)) imgdata = img.tobytes('raw', 'RGBA', 0, -1) elif img.mode == 'P': imgdata = img.tobytes('raw', 'P', 0, -1) return (img, imgdata) def _build(self): self._ico_data = "" self._img_data = b"" if not len(self.image_paths): raise Exception("No images added") num_images = len(self.image_paths) self._ico_data = self._generate_header(num_images) # Size of all the headers (image headers + file header) dataoffset = struct.calcsize('BBBBHHII') * num_images + \ struct.calcsize('HHH') for image in self.image_paths: icondirentry, imgdata, dataoffset = self._generate_icondirentry( image, dataoffset) self._ico_data += icondirentry self._img_data += imgdata if self.output_path: self.save() def _calcstride(self, width_in_bits): ## length = (width_in_bits + 31) // 32 ## return length * 4 return width_in_bits*4 def _generate_header(self, num_images): return struct.pack('HHH', 0, 1, num_images) def _generate_icondirentry(self, image_path, dataoffset): img, imgdata = self.__load_image(image_path) bWidth = img.size[0] bHeight = img.size[1] bReserved = 0 wPlanes = 0 # Bit count if img.mode == 'RGB': wBitCount = 24 bColorCount = 0 elif img.mode == 'RGBA': wBitCount = 32 bColorCount = 0 elif img.mode == 'P': wBitCount = 8 bColorCount = len(img.palette.getdata()[1]) // 3 print(bColorCount) dwImageOffset = dataoffset # Num bytes in image section length = len(imgdata) + self._calcstride(img.size[0]) * img.size[1] # Generate bitmapinfoheader and prepend this to the pixel data bmpinfoheader = self._generate_bitmapinfoheader(bWidth, bHeight, wPlanes, wBitCount, length, bColorCount) data = bmpinfoheader if img.mode == 'RGB' or img.mode == 'RGBA': # XOR mask (Image) data += imgdata palette_alpha = False elif img.mode == 'P': # Write the palette palette_data = img.palette.getdata() if palette_data[0] == 'RGB;L' or palette_data[0] == 'RGB': palette_alpha = False for x in range(0, len(palette_data[1]), 3): data += palette_data[1][x + 2] # B data += palette_data[1][x + 1] # G data += palette_data[1][x] # R data += struct.pack('B', 0) data += imgdata elif palette_data[0] == 'RGBA;L' or palette_data[0] == 'RGBA': palette_alpha = True #for x in range(4): data += struct.pack('B', 0) for x in range(0, len(palette_data[1]), 3): data += palette_data[1][x + 2] # B data += palette_data[1][x + 1] # G data += palette_data[1][x] # R data += struct.pack('B', 0) for byte in imgdata: if ord(byte) == 0: data += struct.pack('B', 0) else: data += struct.pack('B', ord(byte)) # AND mask (Transparency) if not palette_alpha: rowstride = self._calcstride(img.size[0]) ## print("rowstride", rowstride) data += struct.pack('B', 0) * (rowstride * img.size[1]) else: rowstride = self._calcstride(img.size[0]) ## print("rowstride", rowstride) bytes = [0 for x in range(rowstride * img.size[1])] for y in range(img.size[1] - 1, -1, -1): for x in range(img.size[0]): i = (y * rowstride + x // 8) if img.getpixel((x, y)) == 0: bytes[i] = bytes[i] | 2**(7 - x % 8) for y in range(img.size[1] - 1, -1, -1): for x in range(rowstride): data += struct.pack('B', bytes[y * rowstride + x]) # Increment the data offset pointer dataoffset += len(data) # Size of the dir entry + image data dwBytesInRes = len(data) # Pack the icondirentry header ## print bWidth, bHeight, bColorCount, bReserved,\ ## wPlanes, wBitCount,\ ## dwBytesInRes, dwImageOffset icondirentry = struct.pack('BBBBHHII', bWidth%256, bHeight%256, bColorCount, bReserved, wPlanes, wBitCount, dwBytesInRes, dwImageOffset) return icondirentry, data, dataoffset def _generate_bitmapinfoheader(self, width, height, planes, bit_count, size_image, colors_used): # BitmapInfoHeader biSize = struct.calcsize('IIIHHIIiiII') biWidth = width biHeight = height * 2 # Include the mask height biPlanes = 1 # Must be 1 biBitCount = bit_count biCompression = 0 biSizeImage = size_image biXPelsPerMeter = 0 biYPelsPerMeter = 0 biClrUsed = colors_used biClrImportant = 0 return struct.pack('IIIHHIIiiII', biSize, biWidth, biHeight, biPlanes, biBitCount, biCompression, biSizeImage, biXPelsPerMeter, biYPelsPerMeter, biClrUsed, biClrImportant) if __name__ == '__main__': import sys from optparse import OptionParser usage = "usage: %prog [options] file1 file2 ..." parser = OptionParser(usage=usage) parser.add_option("-o", "--output", dest="output_file", help="Write the icon to this file") parser.add_option("-c", "--convert-rgb", dest="convert_rgb", action="store_true", default=False, help="Convert images to RGB or RGBA (if the image has alpha)" " format before writing.") (options, args) = parser.parse_args() if len(args) == 0: parser.print_help() sys.stderr.write("\nNo input images specified. Exiting.\n") sys.exit(1) if not options.output_file: sys.stderr.write("\nNo output file specified. Exiting.\n") sys.exit(2) ico = Icon(args, options.output_file) if options.convert_rgb: ico.convert_rgb(True) ico.save() print("Successfully wrote icon to %s." % options.output_file)
# # Copyright (c) 2014, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Module for working with EOS access control list resources This module provides an implementation for configuring and managing access access control lists on Arista EOS nodes. Access control lists can be specified as either 'standard' or 'extended' ACLs. This module provides the following class implementations: * Acls -- The top-level class used to manage both standard and extended access control lists in EOS * StandardAcls -- Class that manages the set of standard ACLs * ExtendedAcls -- Class that manages the set of extended ACLs """ import re import netaddr from pyeapi.api import EntityCollection from pyeapi.utils import ProxyCall VALID_ACLS = frozenset(['standard', 'extended']) def mask_to_prefixlen(mask): """Converts a subnet mask from dotted decimal to bit length Args: mask (str): The dotted decimal subnet mask to convert Returns: str: The subnet mask as a valid prefix length """ mask = mask or '255.255.255.255' return netaddr.IPAddress(mask).netmask_bits() def prefixlen_to_mask(prefixlen): """Converts a prefix length to a dotted decimal subnet mask Args: prefixlen (str): The prefix length value to convert Returns: str: The subt mask as a dotted decimal string """ prefixlen = prefixlen or '32' addr = '0.0.0.0/%s' % prefixlen return str(netaddr.IPNetwork(addr).netmask) class Acls(EntityCollection): def __init__(self, node, *args, **kwargs): super(Acls, self).__init__(node, *args, **kwargs) self._instances = dict() def get(self, name): return self.get_instance(name)[name] def getall(self): """Returns all ACLs in a dict object. Returns: A Python dictionary object containing all ACL configuration indexed by ACL name:: { "<ACL1 name>": {...}, "<ACL2 name>": {...} } """ acl_re = re.compile(r'^ip access-list (?:(standard) )?(.+)$', re.M) response = {'standard': {}, 'extended': {}} for acl_type, name in acl_re.findall(self.config): acl = self.get(name) if acl_type and acl_type == 'standard': response['standard'][name] = acl else: response['extended'][name] = acl return response def __getattr__(self, name): return ProxyCall(self.marshall, name) def marshall(self, name, *args, **kwargs): acl_name = args[0] acl_instance = self.get_instance(acl_name) if not hasattr(acl_instance, name): raise AttributeError("'%s' object has no attribute '%s'" % (acl_instance, name)) method = getattr(acl_instance, name) return method(*args, **kwargs) def get_instance(self, name): if name in self._instances: return self._instances[name] acl_re = re.compile(r'^ip access-list (?:(standard) )?(%s)$' % name, re.M) match = acl_re.search(self.config) if match: acl_type = match.group(1) or 'extended' return self.create_instance(match.group(2), acl_type) return {name: None} def create_instance(self, name, acl_type): if acl_type not in VALID_ACLS: acl_type = 'standard' acl_instance = ACL_CLASS_MAP.get(acl_type) self._instances[name] = acl_instance(self.node) return self._instances[name] def create(self, name, type='standard'): # Create ACL instance for ACL type Standard or Extended then call # create method for specific ACL class. acl_instance = self.create_instance(name, type) return acl_instance.create(name) class StandardAcls(EntityCollection): entry_re = re.compile(r'(\d+)' r'(?: ([p|d]\w+))' r'(?: (any))?' r'(?: (host))?' r'(?: ([0-9]+(?:\.[0-9]+){3}))?' r'(?:/([0-9]{1,2}))?' r'(?: ([0-9]+(?:\.[0-9]+){3}))?' r'(?: (log))?') def get(self, name): config = self.get_block('ip access-list standard %s' % name) if not config: return None resource = dict(name=name, type='standard') resource.update(self._parse_entries(config)) return resource def _parse_entries(self, config): entries = dict() for item in re.finditer(r'\d+ [p|d].*$', config, re.M): match = self.entry_re.match(item.group(0)) (seq, act, anyip, host, ip, mlen, mask, log) = match.groups() entry = dict() entry['action'] = act entry['srcaddr'] = ip or '0.0.0.0' entry['srclen'] = mlen or mask_to_prefixlen(mask) entry['log'] = log is not None entries[seq] = entry return dict(entries=entries) def create(self, name): return self.configure('ip access-list standard %s' % name) def delete(self, name): return self.configure('no ip access-list standard %s' % name) def default(self, name): return self.configure('default ip access-list standard %s' % name) def update_entry(self, name, seqno, action, addr, prefixlen, log=False): cmds = ['ip access-list standard %s' % name] cmds.append('no %s' % seqno) entry = '%s %s %s/%s' % (seqno, action, addr, prefixlen) if log: entry += ' log' cmds.append(entry) cmds.append('exit') return self.configure(cmds) def add_entry(self, name, action, addr, prefixlen, log=False, seqno=None): cmds = ['ip access-list standard %s' % name] entry = '%s %s/%s' % (action, addr, prefixlen) if seqno is not None: entry = '%s %s' % (seqno, entry) if log: entry += ' log' cmds.append(entry) cmds.append('exit') return self.configure(cmds) def remove_entry(self, name, seqno): cmds = ['ip access-list standard %s' % name, 'no %s' % seqno, 'exit'] return self.configure(cmds) class ExtendedAcls(EntityCollection): entry_re = re.compile(r'(\d+)' r'(?: ([p|d]\w+))' r'(?: (\w+|\d+))' r'(?: ([a|h]\w+))?' r'(?: ([0-9]+(?:\.[0-9]+){3}))?' r'(?:/([0-9]{1,2}))?' r'(?: ((?:eq|gt|lt|neq|range) [\w-]+))?' r'(?: ([a|h]\w+))?' r'(?: ([0-9]+(?:\.[0-9]+){3}))?' r'(?:/([0-9]{1,2}))?' r'(?: ([0-9]+(?:\.[0-9]+){3}))?' r'(?: ((?:eq|gt|lt|neq|range) [\w-]+))?' r'(?: (.+))?') def get(self, name): config = self.get_block('ip access-list %s' % name) if not config: return None resource = dict(name=name, type='extended') resource.update(self._parse_entries(config)) return resource def _parse_entries(self, config): entries = dict() for item in re.finditer(r'\d+ [p|d].*$', config, re.M): match = self.entry_re.match(item.group(0)) if match: entry = dict() entry['action'] = match.group(2) entry['protocol'] = match.group(3) entry['srcaddr'] = match.group(5) or 'any' entry['srclen'] = match.group(6) entry['srcport'] = match.group(7) entry['dstaddr'] = match.group(9) or 'any' entry['dstlen'] = match.group(10) entry['dstport'] = match.group(12) entry['other'] = match.group(13) entries[match.group(1)] = entry return dict(entries=entries) def create(self, name): return self.configure('ip access-list %s' % name) def delete(self, name): return self.configure('no ip access-list %s' % name) def default(self, name): return self.configure('default ip access-list %s' % name) def update_entry(self, name, seqno, action, protocol, srcaddr, srcprefixlen, dstaddr, dstprefixlen, log=False): cmds = ['ip access-list %s' % name] cmds.append('no %s' % seqno) entry = '%s %s %s %s/%s %s/%s' % (seqno, action, protocol, srcaddr, srcprefixlen, dstaddr, dstprefixlen) if log: entry += ' log' cmds.append(entry) cmds.append('exit') return self.configure(cmds) def add_entry(self, name, action, protocol, srcaddr, srcprefixlen, dstaddr, dstprefixlen, log=False, seqno=None): cmds = ['ip access-list %s' % name] entry = '%s %s %s/%s %s/%s' % (action, protocol, srcaddr, srcprefixlen, dstaddr, dstprefixlen) if seqno is not None: entry = '%s %s' % (seqno, entry) if log: entry += ' log' cmds.append(entry) cmds.append('exit') return self.configure(cmds) def remove_entry(self, name, seqno): cmds = ['ip access-list %s' % name, 'no %s' % seqno, 'exit'] return self.configure(cmds) ACL_CLASS_MAP = {'standard': StandardAcls, 'extended': ExtendedAcls} def instance(node): return Acls(node)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.contrib.kfac.fisher_blocks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.kfac.python.ops import fisher_blocks as fb from tensorflow.contrib.kfac.python.ops import layer_collection as lc from tensorflow.contrib.kfac.python.ops import utils from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import test def _make_psd(dim): """Constructs a PSD matrix of the given dimension.""" mat = np.ones((dim, dim), dtype=np.float32) mat[np.arange(dim), np.arange(dim)] = 2. + np.arange(dim) return array_ops.constant(mat) class UtilsTest(test.TestCase): def testComputePiTracenorm(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) left_factor = array_ops.diag([1., 2., 0., 1.]) right_factor = array_ops.ones([2., 2.]) # pi is the sqrt of the left trace norm divided by the right trace norm pi = fb._compute_pi_tracenorm(left_factor, right_factor) pi_val = sess.run(pi) self.assertEqual(1., pi_val) class FullFBTest(test.TestCase): def testFullFBInitSingleTensor(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.FullFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) self.assertAllEqual(params, block.tensors_to_compute_grads()) def testFullFBInitTensorTuple(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.FullFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) self.assertAllEqual(params, block.tensors_to_compute_grads()) def testInstantiateFactors(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.FullFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = (params[0]**2, math_ops.sqrt(params[1])) block.instantiate_factors(grads, 0.5) def testMultiplyInverseTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.FullFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = (params[0]**2, math_ops.sqrt(params[1])) block.instantiate_factors((grads,), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._factor.make_inverse_update_ops()) vector = array_ops.ones(3,) * 2 output = block.multiply_inverse(vector) self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output)) def testMultiplyInverseNotTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = array_ops.constant([[1.], [2.]]) block = fb.FullFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = params**2 block.instantiate_factors((grads,), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._factor.make_inverse_update_ops()) vector = array_ops.ones(2,) * 2 output = block.multiply_inverse(vector) self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output)) def testMultiplyInverseAgainstExplicit(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.FullFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = (array_ops.constant([2., 3.]), array_ops.constant(4.)) damping = 0.5 block.instantiate_factors((grads,), damping) # Make sure our inverse is something other than the identity. sess.run(state_ops.assign(block._factor._cov, _make_psd(3))) sess.run(block._factor.make_inverse_update_ops()) v_flat = np.array([4., 5., 6.], dtype=np.float32) vector = utils.column_to_tensors(params, array_ops.constant(v_flat)) output = block.multiply_inverse(vector) output_flat = sess.run(utils.tensors_to_column(output)).ravel() full = sess.run(block.full_fisher_block()) explicit = np.dot(np.linalg.inv(full + damping * np.eye(3)), v_flat) self.assertAllClose(output_flat, explicit) class NaiveDiagonalFBTest(test.TestCase): def testNaiveDiagonalFBInitSingleTensor(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.NaiveDiagonalFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) self.assertAllEqual(params, block.tensors_to_compute_grads()) def testNaiveDiagonalFBInitTensorTuple(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.NaiveDiagonalFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) self.assertAllEqual(params, block.tensors_to_compute_grads()) def testInstantiateFactors(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.NaiveDiagonalFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = (params[0]**2, math_ops.sqrt(params[1])) block.instantiate_factors(grads, 0.5) def testMultiplyInverseTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.NaiveDiagonalFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = (params[0]**2, math_ops.sqrt(params[1])) block.instantiate_factors((grads,), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._factor.make_inverse_update_ops()) vector = array_ops.ones(3,) * 2 output = block.multiply_inverse(vector) self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output)) def testMultiplyInverseNotTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = array_ops.constant([[1.], [2.]]) block = fb.NaiveDiagonalFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = params**2 block.instantiate_factors((grads,), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._factor.make_inverse_update_ops()) vector = array_ops.ones(2,) * 2 output = block.multiply_inverse(vector) self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output)) def testMultiplyInverseAgainstExplicit(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = (array_ops.constant([1., 2.]), array_ops.constant(3.)) block = fb.NaiveDiagonalFB(lc.LayerCollection(), params) block.register_additional_minibatch(32) grads = (params[0]**2, math_ops.sqrt(params[1])) damping = 0.5 block.instantiate_factors((grads,), damping) cov = array_ops.reshape(array_ops.constant([2., 3., 4.]), [-1, 1]) sess.run(state_ops.assign(block._factor._cov, cov)) sess.run(block._factor.make_inverse_update_ops()) v_flat = np.array([4., 5., 6.], dtype=np.float32) vector = utils.column_to_tensors(params, array_ops.constant(v_flat)) output = block.multiply_inverse(vector) output_flat = sess.run(utils.tensors_to_column(output)).ravel() full = sess.run(block.full_fisher_block()) explicit = np.dot(np.linalg.inv(full + damping * np.eye(3)), v_flat) self.assertAllClose(output_flat, explicit) class FullyConnectedDiagonalFB(test.TestCase): def setUp(self): super(FullyConnectedDiagonalFB, self).setUp() self.batch_size = 4 self.input_size = 6 self.output_size = 3 self.inputs = np.random.randn(self.batch_size, self.input_size).astype( np.float32) self.outputs = np.zeros([self.batch_size, self.output_size]).astype( np.float32) self.output_grads = np.random.randn(self.batch_size, self.output_size).astype(np.float32) self.w = np.random.randn(self.input_size, self.output_size).astype( np.float32) self.b = np.random.randn(self.output_size).astype(np.float32) def fisherApprox(self, has_bias=False): """Fisher approximation using default inputs.""" if has_bias: inputs = np.concatenate( [self.inputs, np.ones([self.batch_size, 1])], axis=1) else: inputs = self.inputs return self.buildDiagonalFisherApproximation(inputs, self.output_grads) def buildDiagonalFisherApproximation(self, inputs, output_grads): """Builds explicit diagonal Fisher approximation. Fisher's diagonal is (d loss / d w)'s elements squared for d/dw = E[outer(input, output_grad)] where the expectation is taken over examples. Args: inputs: np.array of shape [batch_size, input_size]. output_grads: np.array of shape [batch_size, output_size]. Returns: Diagonal np.array of shape [num_params, num_params] for num_params = input_size * output_size. """ batch_size = inputs.shape[0] assert output_grads.shape[0] == batch_size input_size = inputs.shape[1] output_size = output_grads.shape[1] fisher_diag = np.zeros((input_size, output_size)) for i in range(batch_size): fisher_diag += np.square(np.outer(inputs[i], output_grads[i])) return np.diag(fisher_diag.flatten()) / batch_size def testMultiply(self): result, _ = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs], [self.output_grads]) # Construct Fisher-vector product. expected_result = self.fisherApprox().dot(self.w.flatten()) expected_result = expected_result.reshape( [self.input_size, self.output_size]) self.assertAllClose(expected_result, result) def testMultiplyInverse(self): _, result = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs], [self.output_grads]) # Construct inverse Fisher-vector product. expected_result = np.linalg.inv(self.fisherApprox()).dot(self.w.flatten()) expected_result = expected_result.reshape( [self.input_size, self.output_size]) self.assertAllClose(expected_result, result) def testRegisterAdditionalMinibatch(self): """Ensure 1 big minibatch and 2 small minibatches are equivalent.""" multiply_result_big, multiply_inverse_result_big = self.runFisherBlockOps( self.w, [self.inputs], [self.outputs], [self.output_grads]) multiply_result_small, multiply_inverse_result_small = ( self.runFisherBlockOps(self.w, np.split(self.inputs, 2), np.split(self.outputs, 2), np.split(self.output_grads, 2))) self.assertAllClose(multiply_result_big, multiply_result_small) self.assertAllClose(multiply_inverse_result_big, multiply_inverse_result_small) def testMultiplyHasBias(self): result, _ = self.runFisherBlockOps((self.w, self.b), [self.inputs], [self.outputs], [self.output_grads]) expected_result = self.fisherApprox(True).dot( np.concatenate([self.w.flatten(), self.b.flatten()])) expected_result = expected_result.reshape( [self.input_size + 1, self.output_size]) expected_result = (expected_result[:-1], expected_result[-1]) self.assertEqual(len(result), 2) self.assertAllClose(expected_result[0], result[0]) self.assertAllClose(expected_result[1], result[1]) def runFisherBlockOps(self, params, inputs, outputs, output_grads): """Run Ops guaranteed by FisherBlock interface. Args: params: Tensor or 2-tuple of Tensors. Represents weights or weights and bias of this layer. inputs: list of Tensors of shape [batch_size, input_size]. Inputs to layer. outputs: list of Tensors of shape [batch_size, output_size]. Preactivations produced by layer. output_grads: list of Tensors of shape [batch_size, output_size]. Gradient of loss with respect to 'outputs'. Returns: multiply_result: Result of FisherBlock.multiply(params) multiply_inverse_result: Result of FisherBlock.multiply_inverse(params) """ with ops.Graph().as_default(), self.test_session() as sess: inputs = as_tensors(inputs) outputs = as_tensors(outputs) output_grads = as_tensors(output_grads) params = as_tensors(params) block = fb.FullyConnectedDiagonalFB( lc.LayerCollection(), has_bias=isinstance(params, (tuple, list))) for (i, o) in zip(inputs, outputs): block.register_additional_minibatch(i, o) block.instantiate_factors((output_grads,), damping=0.0) sess.run(tf_variables.global_variables_initializer()) sess.run(block._factor.make_covariance_update_op(0.0)) multiply_result = sess.run(block.multiply(params)) multiply_inverse_result = sess.run(block.multiply_inverse(params)) return multiply_result, multiply_inverse_result class FullyConnectedKFACBasicFBTest(test.TestCase): def testFullyConnectedKFACBasicFBInit(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) inputs = array_ops.constant([1., 2.]) outputs = array_ops.constant([3., 4.]) block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection()) block.register_additional_minibatch(inputs, outputs) self.assertAllEqual([outputs], block.tensors_to_compute_grads()) def testInstantiateFactorsHasBias(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) inputs = array_ops.constant([[1., 2.], [3., 4.]]) outputs = array_ops.constant([[3., 4.], [5., 6.]]) block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=True) block.register_additional_minibatch(inputs, outputs) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) def testInstantiateFactorsNoBias(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) inputs = array_ops.constant([[1., 2.], [3., 4.]]) outputs = array_ops.constant([[3., 4.], [5., 6.]]) block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False) block.register_additional_minibatch(inputs, outputs) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) def testMultiplyInverseTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) inputs = array_ops.constant([[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]) outputs = array_ops.constant([[3., 4.], [5., 6.]]) block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False) block.register_additional_minibatch(inputs, outputs) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) vector = ( np.arange(2, 6).reshape(2, 2).astype(np.float32), # np.arange(1, 3).reshape(2, 1).astype(np.float32)) output = block.multiply_inverse((array_ops.constant(vector[0]), array_ops.constant(vector[1]))) output = sess.run(output) self.assertAllClose([[0.686291, 1.029437], [1.372583, 1.715729]], output[0]) self.assertAllClose([0.343146, 0.686291], output[1]) def testMultiplyInverseNotTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) inputs = array_ops.constant([[1., 2.], [3., 4.]]) outputs = array_ops.constant([[3., 4.], [5., 6.]]) block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False) block.register_additional_minibatch(inputs, outputs) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) vector = np.arange(2, 6).reshape(2, 2).astype(np.float32) output = block.multiply_inverse(array_ops.constant(vector)) self.assertAllClose([[0.686291, 1.029437], [1.372583, 1.715729]], sess.run(output)) def testMultiplyInverseAgainstExplicit(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) input_dim, output_dim = 3, 2 inputs = array_ops.zeros([32, input_dim]) outputs = array_ops.zeros([32, output_dim]) params = array_ops.zeros([input_dim, output_dim]) block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False) block.register_additional_minibatch(inputs, outputs) grads = outputs**2 damping = 0. # This test is only valid without damping. block.instantiate_factors(([grads],), damping) sess.run(state_ops.assign(block._input_factor._cov, _make_psd(3))) sess.run(state_ops.assign(block._output_factor._cov, _make_psd(2))) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) v_flat = np.arange(6, dtype=np.float32) vector = utils.column_to_tensors(params, array_ops.constant(v_flat)) output = block.multiply_inverse(vector) output_flat = sess.run(utils.tensors_to_column(output)).ravel() full = sess.run(block.full_fisher_block()) explicit = np.dot(np.linalg.inv(full + damping * np.eye(6)), v_flat) self.assertAllClose(output_flat, explicit) class ConvDiagonalFBTest(test.TestCase): def setUp(self): super(ConvDiagonalFBTest, self).setUp() self.batch_size = 2 self.height = 8 self.width = 4 self.input_channels = 6 self.output_channels = 3 self.kernel_size = 1 self.inputs = np.random.randn(self.batch_size, self.height, self.width, self.input_channels).astype(np.float32) self.outputs = np.zeros( [self.batch_size, self.height, self.width, self.output_channels]).astype(np.float32) self.output_grads = np.random.randn( self.batch_size, self.height, self.width, self.output_channels).astype( np.float32) self.w = np.random.randn(self.kernel_size, self.kernel_size, self.input_channels, self.output_channels).astype( np.float32) self.b = np.random.randn(self.output_channels).astype(np.float32) def fisherApprox(self, has_bias=False): """Fisher approximation using default inputs.""" if has_bias: inputs = np.concatenate( [self.inputs, np.ones([self.batch_size, self.height, self.width, 1])], axis=-1) else: inputs = self.inputs return self.buildDiagonalFisherApproximation(inputs, self.output_grads, self.kernel_size) def buildDiagonalFisherApproximation(self, inputs, output_grads, kernel_size): r"""Builds explicit diagonal Fisher approximation. Fisher's diagonal is (d loss / d w)'s elements squared for d/dw = E[\sum_{loc} outer(input_{loc}, output_grad_{loc})] where the expectation is taken over examples and the sum over (x, y) locations upon which the convolution is applied. Args: inputs: np.array of shape [batch_size, height, width, input_channels]. output_grads: np.array of shape [batch_size, height, width, output_channels]. kernel_size: int. height and width of kernel. Returns: Diagonal np.array of shape [num_params, num_params] for num_params = kernel_size^2 * input_channels * output_channels. """ batch_size, height, width, input_channels = inputs.shape assert output_grads.shape[0] == batch_size assert output_grads.shape[1] == height assert output_grads.shape[2] == width output_channels = output_grads.shape[3] # If kernel_size == 1, then we don't need to worry about capturing context # around the pixel upon which a convolution is applied. This makes testing # easier. assert kernel_size == 1, "kernel_size != 1 isn't supported." num_locations = height * width inputs = np.reshape(inputs, [batch_size, num_locations, input_channels]) output_grads = np.reshape(output_grads, [batch_size, num_locations, output_channels]) fisher_diag = np.zeros((input_channels, output_channels)) for i in range(batch_size): # Each example's approximation is a square(sum-of-outer-products). example_fisher_diag = np.zeros((input_channels, output_channels)) for j in range(num_locations): example_fisher_diag += np.outer(inputs[i, j], output_grads[i, j]) fisher_diag += np.square(example_fisher_diag) # Normalize by batch_size (not num_locations). return np.diag(fisher_diag.flatten()) / batch_size def testMultiply(self): result, _ = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs], [self.output_grads]) # Construct Fisher-vector product. expected_result = self.fisherApprox().dot(self.w.flatten()) expected_result = expected_result.reshape([ self.kernel_size, self.kernel_size, self.input_channels, self.output_channels ]) self.assertAllClose(expected_result, result) def testMultiplyInverse(self): _, result = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs], [self.output_grads]) # Construct inverse Fisher-vector product. expected_result = np.linalg.inv(self.fisherApprox()).dot(self.w.flatten()) expected_result = expected_result.reshape([ self.kernel_size, self.kernel_size, self.input_channels, self.output_channels ]) self.assertAllClose(expected_result, result, atol=1e-3) def testRegisterAdditionalMinibatch(self): """Ensure 1 big minibatch and 2 small minibatches are equivalent.""" multiply_result_big, multiply_inverse_result_big = self.runFisherBlockOps( self.w, [self.inputs], [self.outputs], [self.output_grads]) multiply_result_small, multiply_inverse_result_small = ( self.runFisherBlockOps(self.w, np.split(self.inputs, 2), np.split(self.outputs, 2), np.split(self.output_grads, 2))) self.assertAllClose(multiply_result_big, multiply_result_small) self.assertAllClose(multiply_inverse_result_big, multiply_inverse_result_small) def testMultiplyHasBias(self): result, _ = self.runFisherBlockOps((self.w, self.b), [self.inputs], [self.outputs], [self.output_grads]) # Clone 'b' along 'input_channels' dimension. b_filter = np.tile( np.reshape(self.b, [1, 1, 1, self.output_channels]), [self.kernel_size, self.kernel_size, 1, 1]) params = np.concatenate([self.w, b_filter], axis=2) expected_result = self.fisherApprox(True).dot(params.flatten()) # Extract 'b' from concatenated parameters. expected_result = expected_result.reshape([ self.kernel_size, self.kernel_size, self.input_channels + 1, self.output_channels ]) expected_result = (expected_result[:, :, 0:-1, :], np.reshape(expected_result[:, :, -1, :], [self.output_channels])) self.assertEqual(len(result), 2) self.assertAllClose(expected_result[0], result[0]) self.assertAllClose(expected_result[1], result[1]) def runFisherBlockOps(self, params, inputs, outputs, output_grads): """Run Ops guaranteed by FisherBlock interface. Args: params: Tensor or 2-tuple of Tensors. Represents weights or weights and bias of this layer. inputs: list of Tensors of shape [batch_size, input_size]. Inputs to layer. outputs: list of Tensors of shape [batch_size, output_size]. Preactivations produced by layer. output_grads: list of Tensors of shape [batch_size, output_size]. Gradient of loss with respect to 'outputs'. Returns: multiply_result: Result of FisherBlock.multiply(params) multiply_inverse_result: Result of FisherBlock.multiply_inverse(params) """ with ops.Graph().as_default(), self.test_session() as sess: inputs = as_tensors(inputs) outputs = as_tensors(outputs) output_grads = as_tensors(output_grads) params = as_tensors(params) block = fb.ConvDiagonalFB( lc.LayerCollection(), params, strides=[1, 1, 1, 1], padding='SAME') for (i, o) in zip(inputs, outputs): block.register_additional_minibatch(i, o) block.instantiate_factors((output_grads,), damping=0.0) sess.run(tf_variables.global_variables_initializer()) sess.run(block._factor.make_covariance_update_op(0.0)) multiply_result = sess.run(block.multiply(params)) multiply_inverse_result = sess.run(block.multiply_inverse(params)) return multiply_result, multiply_inverse_result class ConvKFCBasicFBTest(test.TestCase): def _testConvKFCBasicFBInitParams(self, params): with ops.Graph().as_default(): random_seed.set_random_seed(200) if isinstance(params, (list, tuple)): params = [array_ops.constant(param) for param in params] else: params = array_ops.constant(params) inputs = random_ops.random_normal((2, 2, 2)) outputs = random_ops.random_normal((2, 2, 2)) block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, [1, 1, 1], 'SAME') block.register_additional_minibatch(inputs, outputs) self.assertAllEqual([outputs], block.tensors_to_compute_grads()) def testConvKFCBasicFBInitParamsParamsTuple(self): self._testConvKFCBasicFBInitParams([np.array([1., 2.]), np.array(3.)]) def testConvKFCBasicFBInitParamsParamsSingle(self): self._testConvKFCBasicFBInitParams([np.array([1., 2.])]) def testMultiplyInverseTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = random_ops.random_normal((2, 2, 2, 2)) inputs = random_ops.random_normal((2, 2, 2, 2)) outputs = random_ops.random_normal((2, 2, 2, 2)) block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1), 'SAME') block.register_additional_minibatch(inputs, outputs) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) vector = (np.arange(1, 15).reshape(7, 2).astype(np.float32), np.arange(2, 4).reshape(2, 1).astype(np.float32)) output = block.multiply_inverse((array_ops.constant(vector[0]), array_ops.constant(vector[1]))) output = sess.run(output) self.assertAllClose([0.136455, 0.27291], output[0][0]) self.assertAllClose([0.27291, 0.409365], output[1]) def testMultiplyInverseNotTuple(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = random_ops.random_normal((2, 2, 2, 2)) inputs = random_ops.random_normal((2, 2, 2, 2)) outputs = random_ops.random_normal((2, 2, 2, 2)) block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1), 'SAME') block.register_additional_minibatch(inputs, outputs) self.assertFalse(block._has_bias) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) vector = np.arange(1, 17).reshape(8, 2).astype(np.float32) output = block.multiply_inverse(array_ops.constant(vector)) self.assertAllClose([0.136455, 0.27291], sess.run(output)[0]) def testMultiplyInverseNotTupleWithBias(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = [random_ops.random_normal((2, 2, 2, 2))] inputs = random_ops.random_normal((2, 2, 2, 2)) outputs = random_ops.random_normal((2, 2, 2, 2)) block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1), 'SAME') block.register_additional_minibatch(inputs, outputs) self.assertTrue(block._has_bias) grads = outputs**2 block.instantiate_factors(([grads],), 0.5) # Make sure our inverse is something other than the identity. sess.run(tf_variables.global_variables_initializer()) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) vector = np.arange(1, 19).reshape(9, 2).astype(np.float32) output = block.multiply_inverse(array_ops.constant(vector)) self.assertAllClose([0.136455, 0.27291], sess.run(output)[0]) def testMultiplyInverseAgainstExplicit(self): with ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) params = array_ops.zeros((2, 2, 2, 2)) inputs = array_ops.zeros((2, 2, 2, 2)) outputs = array_ops.zeros((2, 2, 2, 2)) block = fb.ConvKFCBasicFB(lc.LayerCollection(), params, (1, 1, 1, 1), 'SAME') block.register_additional_minibatch(inputs, outputs) grads = outputs**2 damping = 0. # This test is only valid without damping. block.instantiate_factors(([grads],), damping) sess.run(state_ops.assign(block._input_factor._cov, _make_psd(8))) sess.run(state_ops.assign(block._output_factor._cov, _make_psd(2))) sess.run(block._input_factor.make_inverse_update_ops()) sess.run(block._output_factor.make_inverse_update_ops()) v_flat = np.arange(16, dtype=np.float32) vector = utils.column_to_tensors(params, array_ops.constant(v_flat)) output = block.multiply_inverse(vector) output_flat = sess.run(utils.tensors_to_column(output)).ravel() full = sess.run(block.full_fisher_block()) explicit = np.dot(np.linalg.inv(full + damping * np.eye(16)), v_flat) self.assertAllClose(output_flat, explicit) class FullyConnectedSeriesFBTest(test.TestCase): def testFullyConnectedSeriesFBInit(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) inputs = array_ops.constant([1., 2.]) outputs = array_ops.constant([3., 4.]) block = fb.FullyConnectedSeriesFB( lc.LayerCollection(), inputs=[inputs], outputs=[outputs]) self.assertAllEqual([outputs], block.tensors_to_compute_grads()) def testInstantiateFactorsHasBias(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) inputs = array_ops.constant([[1., 2.], [3., 4.]]) outputs = array_ops.constant([[3., 4.], [5., 6.]]) block = fb.FullyConnectedSeriesFB( lc.LayerCollection(), inputs=[inputs], outputs=[outputs], has_bias=True) grads = outputs**2 block.instantiate_factors(((grads,),), 0.5) def testInstantiateFactorsNoBias(self): with ops.Graph().as_default(): random_seed.set_random_seed(200) inputs = array_ops.constant([[1., 2.], [3., 4.]]) outputs = array_ops.constant([[3., 4.], [5., 6.]]) block = fb.FullyConnectedSeriesFB( lc.LayerCollection(), inputs=[inputs], outputs=[outputs], has_bias=False) grads = outputs**2 block.instantiate_factors(((grads,),), 0.5) def as_tensors(tensor_or_tuple): """Converts a potentially nested tuple of np.array to Tensors.""" if isinstance(tensor_or_tuple, (tuple, list)): return tuple(as_tensors(t) for t in tensor_or_tuple) return ops.convert_to_tensor(tensor_or_tuple) if __name__ == '__main__': test.main()
"""The tests for the MQTT lock platform.""" import pytest from homeassistant.components.lock import ( DOMAIN as LOCK_DOMAIN, SERVICE_LOCK, SERVICE_UNLOCK, STATE_LOCKED, STATE_UNLOCKED, ) from homeassistant.const import ATTR_ASSUMED_STATE, ATTR_ENTITY_ID from homeassistant.setup import async_setup_component from .test_common import ( help_test_availability_when_connection_lost, help_test_availability_without_topic, help_test_custom_availability_payload, help_test_default_availability_payload, help_test_discovery_broken, help_test_discovery_removal, help_test_discovery_update, help_test_discovery_update_attr, help_test_entity_debug_info_message, help_test_entity_device_info_remove, help_test_entity_device_info_update, help_test_entity_device_info_with_connection, help_test_entity_device_info_with_identifier, help_test_entity_id_update_discovery_update, help_test_entity_id_update_subscriptions, help_test_setting_attribute_via_mqtt_json_message, help_test_setting_attribute_with_template, help_test_unique_id, help_test_update_with_json_attrs_bad_JSON, help_test_update_with_json_attrs_not_dict, ) from tests.common import async_fire_mqtt_message DEFAULT_CONFIG = { LOCK_DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"} } async def test_controlling_state_via_topic(hass, mqtt_mock): """Test the controlling state via topic.""" assert await async_setup_component( hass, LOCK_DOMAIN, { LOCK_DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "state-topic", "command_topic": "command-topic", "payload_lock": "LOCK", "payload_unlock": "UNLOCK", "state_locked": "LOCKED", "state_unlocked": "UNLOCKED", } }, ) await hass.async_block_till_done() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "state-topic", "LOCKED") state = hass.states.get("lock.test") assert state.state is STATE_LOCKED async_fire_mqtt_message(hass, "state-topic", "UNLOCKED") state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED async def test_controlling_non_default_state_via_topic(hass, mqtt_mock): """Test the controlling state via topic.""" assert await async_setup_component( hass, LOCK_DOMAIN, { LOCK_DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "state-topic", "command_topic": "command-topic", "payload_lock": "LOCK", "payload_unlock": "UNLOCK", "state_locked": "closed", "state_unlocked": "open", } }, ) await hass.async_block_till_done() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "state-topic", "closed") state = hass.states.get("lock.test") assert state.state is STATE_LOCKED async_fire_mqtt_message(hass, "state-topic", "open") state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock): """Test the controlling state via topic and JSON message.""" assert await async_setup_component( hass, LOCK_DOMAIN, { LOCK_DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "state-topic", "command_topic": "command-topic", "payload_lock": "LOCK", "payload_unlock": "UNLOCK", "state_locked": "LOCKED", "state_unlocked": "UNLOCKED", "value_template": "{{ value_json.val }}", } }, ) await hass.async_block_till_done() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED async_fire_mqtt_message(hass, "state-topic", '{"val":"LOCKED"}') state = hass.states.get("lock.test") assert state.state is STATE_LOCKED async_fire_mqtt_message(hass, "state-topic", '{"val":"UNLOCKED"}') state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED async def test_controlling_non_default_state_via_topic_and_json_message( hass, mqtt_mock ): """Test the controlling state via topic and JSON message.""" assert await async_setup_component( hass, LOCK_DOMAIN, { LOCK_DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "state-topic", "command_topic": "command-topic", "payload_lock": "LOCK", "payload_unlock": "UNLOCK", "state_locked": "closed", "state_unlocked": "open", "value_template": "{{ value_json.val }}", } }, ) await hass.async_block_till_done() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED async_fire_mqtt_message(hass, "state-topic", '{"val":"closed"}') state = hass.states.get("lock.test") assert state.state is STATE_LOCKED async_fire_mqtt_message(hass, "state-topic", '{"val":"open"}') state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock): """Test optimistic mode without state topic.""" assert await async_setup_component( hass, LOCK_DOMAIN, { LOCK_DOMAIN: { "platform": "mqtt", "name": "test", "command_topic": "command-topic", "payload_lock": "LOCK", "payload_unlock": "UNLOCK", "state_locked": "LOCKED", "state_unlocked": "UNLOCKED", } }, ) await hass.async_block_till_done() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED assert state.attributes.get(ATTR_ASSUMED_STATE) await hass.services.async_call( LOCK_DOMAIN, SERVICE_LOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True ) mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False) mqtt_mock.async_publish.reset_mock() state = hass.states.get("lock.test") assert state.state is STATE_LOCKED assert state.attributes.get(ATTR_ASSUMED_STATE) await hass.services.async_call( LOCK_DOMAIN, SERVICE_UNLOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True ) mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False) mqtt_mock.async_publish.reset_mock() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED assert state.attributes.get(ATTR_ASSUMED_STATE) async def test_sending_mqtt_commands_and_explicit_optimistic(hass, mqtt_mock): """Test optimistic mode without state topic.""" assert await async_setup_component( hass, LOCK_DOMAIN, { LOCK_DOMAIN: { "platform": "mqtt", "name": "test", "state_topic": "state-topic", "command_topic": "command-topic", "payload_lock": "LOCK", "payload_unlock": "UNLOCK", "state_locked": "LOCKED", "state_unlocked": "UNLOCKED", "optimistic": True, } }, ) await hass.async_block_till_done() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED assert state.attributes.get(ATTR_ASSUMED_STATE) await hass.services.async_call( LOCK_DOMAIN, SERVICE_LOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True ) mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False) mqtt_mock.async_publish.reset_mock() state = hass.states.get("lock.test") assert state.state is STATE_LOCKED assert state.attributes.get(ATTR_ASSUMED_STATE) await hass.services.async_call( LOCK_DOMAIN, SERVICE_UNLOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True ) mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False) mqtt_mock.async_publish.reset_mock() state = hass.states.get("lock.test") assert state.state is STATE_UNLOCKED assert state.attributes.get(ATTR_ASSUMED_STATE) async def test_availability_when_connection_lost(hass, mqtt_mock): """Test availability after MQTT disconnection.""" await help_test_availability_when_connection_lost( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_availability_without_topic(hass, mqtt_mock): """Test availability without defined availability topic.""" await help_test_availability_without_topic( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_default_availability_payload(hass, mqtt_mock): """Test availability by default payload with defined topic.""" await help_test_default_availability_payload( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_custom_availability_payload(hass, mqtt_mock): """Test availability by custom payload with defined topic.""" await help_test_custom_availability_payload( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock): """Test the setting of attribute via MQTT with JSON payload.""" await help_test_setting_attribute_via_mqtt_json_message( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_setting_attribute_with_template(hass, mqtt_mock): """Test the setting of attribute via MQTT with JSON payload.""" await help_test_setting_attribute_with_template( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog): """Test attributes get extracted from a JSON result.""" await help_test_update_with_json_attrs_not_dict( hass, mqtt_mock, caplog, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog): """Test attributes get extracted from a JSON result.""" await help_test_update_with_json_attrs_bad_JSON( hass, mqtt_mock, caplog, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_discovery_update_attr(hass, mqtt_mock, caplog): """Test update of discovered MQTTAttributes.""" await help_test_discovery_update_attr( hass, mqtt_mock, caplog, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_unique_id(hass, mqtt_mock): """Test unique id option only creates one lock per unique_id.""" config = { LOCK_DOMAIN: [ { "platform": "mqtt", "name": "Test 1", "state_topic": "test-topic", "command_topic": "test_topic", "unique_id": "TOTALLY_UNIQUE", }, { "platform": "mqtt", "name": "Test 2", "state_topic": "test-topic", "command_topic": "test_topic", "unique_id": "TOTALLY_UNIQUE", }, ] } await help_test_unique_id(hass, mqtt_mock, LOCK_DOMAIN, config) async def test_discovery_removal_lock(hass, mqtt_mock, caplog): """Test removal of discovered lock.""" data = '{ "name": "test",' ' "command_topic": "test_topic" }' await help_test_discovery_removal(hass, mqtt_mock, caplog, LOCK_DOMAIN, data) async def test_discovery_update_lock(hass, mqtt_mock, caplog): """Test update of discovered lock.""" data1 = ( '{ "name": "Beer",' ' "state_topic": "test_topic",' ' "command_topic": "command_topic",' ' "availability_topic": "availability_topic1" }' ) data2 = ( '{ "name": "Milk",' ' "state_topic": "test_topic2",' ' "command_topic": "command_topic",' ' "availability_topic": "availability_topic2" }' ) await help_test_discovery_update(hass, mqtt_mock, caplog, LOCK_DOMAIN, data1, data2) @pytest.mark.no_fail_on_log_exception async def test_discovery_broken(hass, mqtt_mock, caplog): """Test handling of bad discovery message.""" data1 = '{ "name": "Beer" }' data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }' await help_test_discovery_broken(hass, mqtt_mock, caplog, LOCK_DOMAIN, data1, data2) async def test_entity_device_info_with_connection(hass, mqtt_mock): """Test MQTT lock device registry integration.""" await help_test_entity_device_info_with_connection( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_entity_device_info_with_identifier(hass, mqtt_mock): """Test MQTT lock device registry integration.""" await help_test_entity_device_info_with_identifier( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_entity_device_info_update(hass, mqtt_mock): """Test device registry update.""" await help_test_entity_device_info_update( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_entity_device_info_remove(hass, mqtt_mock): """Test device registry remove.""" await help_test_entity_device_info_remove( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_entity_id_update_subscriptions(hass, mqtt_mock): """Test MQTT subscriptions are managed when entity_id is updated.""" await help_test_entity_id_update_subscriptions( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_entity_id_update_discovery_update(hass, mqtt_mock): """Test MQTT discovery update when entity_id is updated.""" await help_test_entity_id_update_discovery_update( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG ) async def test_entity_debug_info_message(hass, mqtt_mock): """Test MQTT debug info.""" await help_test_entity_debug_info_message( hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG )
""" Functions for the MidBrain Segmentation Tool. """ # Author: Benjamin Garzon <benjamin.garzon@gmail.com> # License: BSD 3 clause from __future__ import division from subprocess import call from nipy import load_image, save_image from nipy.core.api import Image, ImageList import os import numpy as np import pickle from collections import defaultdict import warnings import nibabel as nib from nilearn.image import smooth_img import sys TOL = 1e-4 MAX_ITERS_EM = 200 MIN_ITERS_EM = 10 MAX_ITERS_ICM = 50 MAX_VOXELS_ICM = 1 TWOPI = 2*np.pi EPS = 1e-10 SUBJECT_PREFIX = 'MBST_' def write_image(x, coordmap, fname): """ Write out an image. Parameters ---------- x : numpy ndarray Array containing image intensities. coordmap : coordmap fname : string File name. """ auxImg = Image(x.astype(np.float32), coordmap) warnings.simplefilter('ignore', FutureWarning) newimg = save_image(auxImg, fname) warnings.simplefilter('default', FutureWarning) def get_neighbours(x): """ Get the neighbours of a point given the coordinates (6 points). Parameters ---------- x : tuple 3d coordinates of the center point. fname : string File name. Returns ---------- neighbours: list List of 3d coordinates of the neighbours. """ r = [-1, 0, 1] neighbours = [(x[0]+i,x[1]+j,x[2]+k) for i in r for j in r for k in r if (i,j,k)!=(0,0,0) and abs(i)+abs(j)+abs(k) < 2] return(neighbours) def get_neighbours_2(x): """ Get the neighbours of a point given the coordinates (27 points). Parameters ---------- x : tuple 3d coordinates of the center point. fname : string File name. Returns ---------- neighbours: list List of 3d coordinates of the neighbours. """ r = [-1, 0, 1] neighbours = [(x[0]+i,x[1]+j,x[2]+k) for i in r for j in r for k in r if abs(i)+abs(j)+abs(k) < 2] # neighbours = [x] return(neighbours) def create_atlas(atlas_file, subjects_dir, sub_dir, subjects, structural_file, structural_brain_file, structural_mask_file, parametric_file, mask_file, label_file): """ Create text file with a list of all files necessary to specify an atlas. Parameters ---------- atlas_file : string File name for the text file with the atlas. subjects_dir : string Directory with the subjects. sub_dir : string Name of the subdirectory with the images within the subject directory. subjects : list List of subject names. structural_file : string File name for the structural images. structural_brain_file : string File name for brain extracted structural image. structural_mask_file : string File name for the structural brain masks. parametric_file : string File name for the parametric images. mask_file : string File name for the masks. label_file : string File name for the label images. """ print(("Creating atlas with subjects: %s")%subjects) f = open(atlas_file, 'w') for code, subject in enumerate(subjects): structural = subjects_dir + subject + sub_dir + structural_file structural_brain = subjects_dir + subject + sub_dir + \ structural_brain_file structural_mask = subjects_dir + subject + sub_dir + \ structural_mask_file parametric = subjects_dir + subject + sub_dir + parametric_file mask = subjects_dir + subject + sub_dir + mask_file label = subjects_dir + subject + sub_dir + label_file subject_code = "%s%03d"%(SUBJECT_PREFIX, code) line = ('%s:%s:%s:%s:%s:%s:%s\n')%(subject_code, structural, structural_brain, structural_mask, parametric, mask, label) f.write(line) f.close() def register(target_structural_file, target_structural_brain_file, target_parametric_file, atlas_file, output_dir, clean_up, overwrite): """ Register the midbrain of all the instances of a given atlas to a target subject. It uses FSL's registration tools http://fsl.fmrib.ox.ac.uk/fsl For each instance (subject) in the atlas, the registration is done in 3 stages: 1 - Register non-linearly the structural images to localize the midbrain in the target image and apply the warps to the parametric image. 2 - Crop the parametric image (QSM, ...) around the midbrain. 3 - Register non-linearly the cropped parametric images to get an optimal registration within the midbrain 4 - Apply the registrations to the given label images. The structural and parametric file are assumed to be already aligned. Parameters ---------- target_structural_file : string File name for the target structural image. target_structural_brain_file : string File name for the target brain extracted structural image. target_parametric_file : string File name for the target parametric image. atlas_file : string Text file with the list of files of the atlas (see generate_atlas). output_dir : str Directory to save images generated in the registration. cleanup : boolean True - clean up unessential files after finishing to save space. overwrite : boolean True - overwrite target_label_file if it already exists. """ FSL_DIR_BIN = os.environ['FSL_DIR'] + '/bin/' # Create output dir if not os.path.exists(output_dir): os.makedirs(output_dir) # Read atlas f = open(atlas_file, 'r') for line in f: subject, structural_file, structural_brain_file, \ structural_mask_file, parametric_file, mask_file, \ label_file = line.replace('\n', '').split(':') # File names for the files that will be generated xfm_file = '%s/%s-xfm.mat'%(output_dir, subject) warp_file = '%s/%s-warp.nii.gz'%(output_dir, subject) fine_warp_file = '%s/%s-fine_warp.nii.gz'%(output_dir, subject) warped_parametric_file = '%s/%s-warped_param.nii.gz'%(output_dir, \ subject) cropped_parametric_file = '%s/%s-cr_param.nii.gz'%(output_dir, subject) cropped_target_parametric_file = \ '%s/%s-cr_target_param.nii.gz'%(output_dir, subject) cropped_mask_file = '%s/%s-cr_mask.nii.gz'%(output_dir, subject) cropped_label_file = '%s/%s-cr_label.nii.gz'%(output_dir, subject) cropped_warped_label_file = '%s/%s-cr_warped_label.nii.gz'%(output_dir, subject) cropped_warped_parametric_file = \ '%s/%s-cr_warped_param.nii.gz'%(output_dir, subject) invwarp_file = '%s/%s-invwarp.nii.gz'%(output_dir, subject) warped_mask_file = '%s/%s-warped_mask.nii.gz'%(output_dir, subject) # File with warped labels from this atlas subject warped_label_file = '%s/%s-warped_label.nii.gz'%(output_dir, subject) if (os.path.exists(warped_label_file) and overwrite) or \ not os.path.exists(warped_label_file): print(('Registering subject %s.')%(subject)) # Get mask dimensions mask_image = load_image(mask_file) mask = mask_image.get_data() maxX = np.max(np.max(mask, axis=1), axis=1) firstX = np.where(maxX>0)[0][0] lastX = np.where(maxX>0)[0][-1] maxY = np.max(np.max(mask, axis=0), axis=1) firstY= np.where(maxY>0)[0][0] lastY = np.where(maxY>0)[0][-1] maxZ = np.max(np.max(mask, axis=0), axis=0) firstZ = np.where(maxZ>0)[0][0] lastZ = np.where(maxZ>0)[0][-1] # Registration: target structural to structural command=[FSL_DIR_BIN+'flirt', '-in', target_structural_brain_file, '-ref', structural_brain_file, '-omat', xfm_file] call(command) command=[FSL_DIR_BIN+'fnirt','--in=%s'%target_structural_file, '--ref=%s'%structural_file, '--refmask=%s'%structural_mask_file, '--aff=%s'%xfm_file, '--cout=%s'%warp_file] call(command) command=[FSL_DIR_BIN+'applywarp','--in=%s'%target_parametric_file, '--ref=%s'%parametric_file,'--warp=%s'%warp_file, '--out=%s'%warped_parametric_file] call(command) # Invert warp command=[FSL_DIR_BIN+'invwarp','--ref=%s'%target_structural_file, '--warp=%s'%warp_file, '--out=%s'%invwarp_file] call(command) print('Finished registering the structural image.') # Load parametric files parametric = load_image(parametric_file).get_data() target_parametric = load_image(warped_parametric_file).get_data() # Crop the arrays around mask cropped_parametric = parametric[firstX:lastX + 1, firstY:lastY + 1, firstZ:lastZ + 1] cropped_target_parametric = target_parametric[firstX:lastX + 1, firstY:lastY + 1, firstZ:lastZ + 1] cropped_mask = mask[firstX:lastX + 1, firstY:lastY + 1, firstZ:lastZ + 1] write_image(cropped_parametric, mask_image.coordmap, cropped_parametric_file) write_image(cropped_target_parametric, mask_image.coordmap, cropped_target_parametric_file) write_image(cropped_mask, mask_image.coordmap, cropped_mask_file) # Do finer registration command=[FSL_DIR_BIN+'fnirt','--in=%s'%cropped_parametric_file, '--inmask=%s'%cropped_mask_file, '--ref=%s'%cropped_target_parametric_file, '--refmask=%s'%cropped_mask_file, '--cout=%s'%fine_warp_file, '--intmod=global_linear', '--jacrange=.98,1.02'] call(command) # Crop labels and apply registration label_image = load_image(label_file) labels = label_image.get_data() cropped_labels = labels[firstX:lastX + 1, firstY:lastY + 1, firstZ:lastZ + 1] write_image(cropped_labels, label_image.coordmap, \ cropped_label_file) command=[FSL_DIR_BIN+'applywarp','--in=%s'%cropped_label_file, '--ref=%s'%cropped_mask_file, '--warp=%s'%fine_warp_file, '--out=%s'%cropped_warped_label_file, '--interp=nn'] call(command) # Apply to parametric map for the voting later command=[FSL_DIR_BIN+'applywarp','--in=%s'%cropped_parametric_file, '--ref=%s'%cropped_mask_file, '--warp=%s'%fine_warp_file, '--out=%s'%cropped_warped_parametric_file] call(command) # Put labels back into original space cropped_warped_labels = \ load_image(cropped_warped_label_file).get_data() labels = np.zeros(labels.shape) labels[firstX:lastX + 1, firstY:lastY + 1, firstZ:lastZ + 1] = \ cropped_warped_labels # Save labels write_image(labels, label_image.coordmap, warped_label_file) # Apply inverse warp command=[FSL_DIR_BIN+'applywarp','--in=%s'%warped_label_file, '--ref=%s'%target_structural_file, '--warp=%s'%invwarp_file, '--out=%s'%warped_label_file,'--interp=nn'] call(command) # Apply warp to mask command=[FSL_DIR_BIN+'applywarp','--in=%s'%mask_file, '--ref=%s'%target_structural_file, '--warp=%s'%invwarp_file, '--out=%s'%warped_mask_file,'--interp=nn'] call(command) # Done with all the warps print('Finished applying the warps.') else: print('Registered labels already exist for subject %s.')%(subject) # Clean up if clean_up: for file_name in [ xfm_file, warp_file, fine_warp_file, warped_parametric_file, cropped_parametric_file, cropped_label_file, cropped_warped_label_file, invwarp_file ]: try: os.remove(file_name) except OSError: pass def fuse_labels(subject_list, output_dir, fused_file, average_mask_file, votes_file): """ Fuses the labels of all the registered instances in the atlas. For each of the instances, correlation between the warped and target intensities is computed to give larger weight to instances with better registration. A mask is also created fusing those of all the instances in the atlas. Parameters ---------- subject_list : list List of atlas instances. output_dir : string Directory containing stored images and to output results. fused_file : string File name for calculated fused labels (to be used as priors). average_mask_file : string File name for the average mask, obtained fusing all the masks. votes_file : string Text file where the votes will be saved. """ n_subjects = 0 total_votes = 0 # votes_f = open(output_dir + '/' + 'votes.txt', 'w') votes_f = open(votes_file, 'w') print('Fusing all registered labels to obtain priors.') for subject in subject_list: # File names for the files that will be used warped_label_file = '%s/%s-warped_label.nii.gz'%(output_dir, subject) cropped_mask_file = '%s/%s-cr_mask.nii.gz'%(output_dir, subject) cropped_warped_parametric_file = \ '%s/%s-cr_warped_param.nii.gz'%(output_dir, subject) cropped_target_parametric_file = \ '%s/%s-cr_target_param.nii.gz'%(output_dir, subject) warped_mask_file = '%s/%s-warped_mask.nii.gz'%(output_dir, subject) # Load images label_image = load_image(warped_label_file) warped_labels = label_image.get_data() warped_mask_image = load_image(warped_mask_file) warped_mask = warped_mask_image.get_data() cropped_mask = load_image(cropped_mask_file).get_data() cropped_warped_parametric = \ load_image(cropped_warped_parametric_file).get_data() cropped_target_parametric = \ load_image(cropped_target_parametric_file).get_data() # Compute correlation between warped and target intensities corr = np.corrcoef(cropped_warped_parametric[cropped_mask>0], cropped_target_parametric[cropped_mask>0])[0,1] # Compute votes and save if corr < 0: corr = 0 vote = corr**2 votes_f.write('%s: %f\n'%(subject, vote)) # Create / update fused labels and mask if n_subjects == 0: fused_labels = warped_labels*vote fused_mask = warped_mask*vote else: fused_labels += warped_labels*vote fused_mask += warped_mask*vote total_votes += vote n_subjects += 1 # Compute votes fused_labels = (fused_labels/total_votes) fused_mask = (fused_mask/total_votes) > .5 votes_f.close() # Write out resulting images write_image(fused_labels, label_image.coordmap, fused_file) write_image(fused_mask, warped_mask_image.coordmap, average_mask_file) def smooth_map(input_file, output_file, fwhm): """ Returns value of a Gaussian function. Parameters ---------- input_file : string Input file name. output_file : string Output file name. fwhm : float FWHM """ output_image = smooth_img(input_file, fwhm=fwhm) nib.save(output_image, output_file) def norm(x, sigma2): """ Returns value of a Gaussian function. Parameters ---------- x : numpy ndarray x values. sigma2 : float Variance. Returns ---------- y : numpy ndarray Value of the function evaluated at x. """ y = np.exp(-0.5*x**2/sigma2) / np.sqrt(TWOPI*sigma2) + EPS return(y) def expectation_maximization(parametric_matrix, priors_matrix): """ Estimate posteriors by expectation maximization. Parameters ---------- parametric_matrix : numpy ndarray Matrix with intensities repeated for each class (n_voxels x n_classes). priors_matrix : numpy ndarray Matrix with priors for each class (n_voxels x n_classes). Returns ---------- pis : numpy ndarray Matrix with posteriors for each class (n_voxels x n_classes). """ n_classes = parametric_matrix.shape[1] n_voxels = parametric_matrix.shape[0] iteration = 1 ratio = 1 E = -1 priors_matrix = np.hstack((priors_matrix, priors_matrix, priors_matrix))/3 parametric_matrix = np.hstack((parametric_matrix, \ parametric_matrix, parametric_matrix)) pis = priors_matrix # Iterate ensuring a min of iterations to avoid local min while ((ratio > TOL) or (iteration < MIN_ITERS_EM)) \ and (iteration < MAX_ITERS_EM): # Update parameter estimates means = np.sum(pis * parametric_matrix, axis=0, keepdims=True)/ \ np.sum(pis, axis=0, keepdims=True) means_matrix = np.repeat(means, n_voxels, axis=0) sigma2 = np.sum(pis * \ (parametric_matrix - means_matrix)**2, axis=0, keepdims=True)/ \ np.sum(pis, axis=0, keepdims=True) sigma2_matrix = np.repeat(sigma2, n_voxels, 0) if iteration == 1: # Separate background classes means_matrix[:, :n_classes] = \ means_matrix[:, :n_classes] \ - 2*np.sqrt(sigma2_matrix[:, :n_classes]) means_matrix[:, n_classes:2*n_classes] = \ means_matrix[:, n_classes:2*n_classes] \ + 2*np.sqrt(sigma2_matrix[:, n_classes:2*n_classes]) sigma2_matrix = sigma2_matrix/9 f = norm((parametric_matrix - means_matrix), sigma2_matrix) \ * priors_matrix pis = f / np.repeat(np.sum(f, axis=1, keepdims=True), n_classes*3, 1) # Update likelihood E_old = E E = - np.sum(np.log(np.sum(f, axis=1))) ratio = (E - E_old)/E_old iteration += 1 print('Expectation-maximization: Iterations: %d, Cost: %f.'%(iteration-1, E)) pis = pis[:, :n_classes] + pis[:, n_classes:2*n_classes] + \ pis[:, 2*n_classes:] return(pis) def estimate_spatial_relations(subject_list, output_dir, spatial_rel_file, priors_file): """ Estimates the probabilities of classes of adjacent voxels from the label images warped to the target subject. Parameters ---------- subject_list : list List of atlas instances. output_dir : string Directory containing stored images and to output results. spatial_rel_file : string File name for the pickle file to store the spatial relations as a dict. The dict is indexed as: (center coords, neighbour coords, center class, neighbour class):probability priors_file : string File name for the priors. """ # Load priors priors_image = load_image(priors_file) priors = priors_image.get_data() mask = np.sum(priors, 3) > 0 n_voxels = np.sum(mask > 0) print ("Number of voxels to estimate spatial relations for: %d.\n")%n_voxels spatial_rel = defaultdict(float) n_subjects = len(subject_list) coords = np.where(mask) points = set(zip(coords[0], coords[1], coords[2])) for subject in subject_list: print ("Estimating spatial relations for subject %s.\n")%subject # Load data warped_label_file = '%s/%s-warped_label.nii.gz'%(output_dir, subject) label_image = load_image(warped_label_file) warped_labels = label_image.get_data().astype(np.int8) shape = warped_labels.shape n_classes = shape[3] segmentation = np.zeros(shape[:3], dtype=np.int8) # Create segmentation image for i in range(n_classes): segmentation = segmentation + (i + 1) * warped_labels[:, :, :, i] # Estimate spatial relations for point in points: neighbours = get_neighbours(point) for neighbour in neighbours: index = (point, neighbour, segmentation[point[0], point[1], point[2]], segmentation[neighbour[0], neighbour[1], neighbour[2]]) spatial_rel[index] += 1./n_subjects # Smooth spatial relations spatial_rel_smoothed = defaultdict(float) spatial_rel_normalization = defaultdict(float) print "Smoothing spatial relations.\n" for point in points: neighbours = get_neighbours(point) for neighbour in neighbours: d = (neighbour[0] - point[0], neighbour[1] - point[1], \ neighbour[2] - point[2]) for label_point in range(n_classes+1): for label_neighbour in range(n_classes+1): p = np.mean([ spatial_rel[(x, (x[0] + d[0], x[1] + d[1], \ x[2] + d[2]), label_point, label_neighbour)] \ for x in get_neighbours_2(point)]) spatial_rel_smoothed[(point, neighbour, label_point, \ label_neighbour)] = p spatial_rel_normalization[(point, neighbour, label_point)] \ += p # Normalize spatial relations print "Normalizing spatial relations.\n" for point in points: neighbours = get_neighbours(point) for neighbour in neighbours: for label_point in range(n_classes+1): for label_neighbour in range(n_classes+1): p = spatial_rel_normalization[(point, neighbour, \ label_point)] if p > 0: spatial_rel_smoothed[ (point, neighbour, label_point, \ label_neighbour) ] /= p else: spatial_rel_smoothed[ (point, neighbour, label_point, \ label_neighbour) ] = -1 with open(spatial_rel_file, 'wb') as fp: pickle.dump(spatial_rel_smoothed, fp) def compute_regularization(points, spatial_rel, segmentation, shape): """ Computes regularization using the spatial relations. Parameters ---------- points : list List with the coordinates of voxels. spatial_rel_file : string File name for the pickle file to store the spatial relations as a dict. The dict is indexed as: (center coords, neighbour coords, center class, neighbour class):probability segmentation : ndarray Segmentation image. shape : tuple Shape of the priors image (=shape of the segmentation x n_classes). Returns ---------- scores : dict Dictionary with the scores of the subjects. """ n_labels = shape[3] # Add background class (index 0) shape = (shape[0], shape[1], shape[2], shape[3] + 1) # Compute spatial regularization based on spatial relations regularization = np.ones(shape) reg_exponent = np.zeros(shape) print "Computing regularization using spatial relations.\n" for lab_ind in range(n_labels + 1): for point in points: neighbours = get_neighbours(point) for neighbour in neighbours: index = (neighbour, point, segmentation[neighbour[0], neighbour[1], neighbour[2]], lab_ind) if spatial_rel[index] >= 0: regularization[point[0], point[1], point[2], lab_ind] *= spatial_rel[index] reg_exponent[point[0], point[1], point[2], lab_ind] += 1 # else: # print "Spatial relation not found: %s"%(index,) regularization[reg_exponent>0] = np.power(regularization[reg_exponent>0], 1./reg_exponent[reg_exponent>0]) regularization = regularization[:, :, :, 1:]/ \ np.repeat(np.sum(regularization, axis=3, keepdims=True) + EPS, n_labels, axis=3) return(regularization) def do_segmentation(parametric_file, priors_file, mask_file, segmentation_file, segmentation4D_file, spatial_rel_file): """ Do the segmentation based on the priors file and expectation maximization for the probabilities and using regularization. Parameters ---------- parametric_file : string File name for the parametric image. priors_file : string File name for the priors. mask_file : string File name for mask image. segmentation_file : string File name for segmentation, 1 volume, each class with a different value. If equal to '' no regularization is used. segmentation4D_file : string File name for segmentation, 1 volume per class. spatial_rel_file : string File name for the pickle file to store the spatial relations as a dict. """ # Load data priors_image = load_image(priors_file) priors = priors_image.get_data() orig_priors = priors mask_image = load_image(mask_file) mask = mask_image.get_data()>0 parametric = load_image(parametric_file).get_data()[mask] n_voxels = np.sum(mask>0) n_labels = priors.shape[3] mask4D = np.reshape(mask, mask.shape + (1, ) ) mask4D = np.repeat(mask4D, n_labels, axis=3) coords = np.where(mask) points = set(zip(coords[0], coords[1], coords[2])) n_classes = n_labels + 1 parametric_matrix = np.repeat(parametric[:, np.newaxis], n_classes, axis=1) if spatial_rel_file == '': # Not using regularization print ("Segmenting image.") priors_matrix = np.reshape(priors[mask4D], (n_voxels, n_labels)) # Add background classes background = (1 - np.sum(priors_matrix, axis=1, keepdims=True)) priors_matrix = np.hstack((priors_matrix, background)) # Expectation-maximization pis = expectation_maximization(parametric_matrix, priors_matrix) # Get segmentation posteriors = np.zeros(priors.shape) posteriors[mask4D] = pis[:, :-1].ravel() max_pis = np.max(pis, axis=1, keepdims=True) labels = 1*(pis == np.repeat(max_pis, n_classes, axis=1)) segmentation4D = np.zeros(priors.shape, dtype=np.int8) segmentation4D[mask4D] = labels[:, :-1].ravel().astype(int) segmentation = np.zeros(mask.shape, dtype=np.int8) for i in range(n_labels): segmentation = segmentation + (i+1)*segmentation4D[:, :, :, i] else: # Using regularization print ("Segmenting image with regularization.") with open(spatial_rel_file, 'rb') as fp: spatial_rel = pickle.load(fp) # Iterate between expectation maximization and spatial regularization segmentation_old = np.zeros(mask.shape, dtype=np.int8) seg_diff = MAX_VOXELS_ICM iteration = 0 while (seg_diff >= MAX_VOXELS_ICM and iteration < MAX_ITERS_ICM): print("Iteration %d"%(iteration)) priors_matrix = np.reshape(priors[mask4D], (n_voxels, n_labels)) # Add background class background = (1 - np.sum(priors_matrix, axis=1, keepdims=True)) priors_matrix = np.hstack((priors_matrix, background)) # Expectation-maximization pis = expectation_maximization(parametric_matrix, priors_matrix) # Get posteriors and segmentation posteriors = np.zeros(priors.shape) posteriors[mask4D] = pis[:, :-1].ravel() max_pis = np.max(pis, axis=1, keepdims=True) labels = 1*(pis == np.repeat(max_pis, n_classes, axis=1)) segmentation4D = np.zeros(priors.shape, dtype=np.int8) segmentation4D[mask4D] = labels[:, :-1].ravel().astype(int) segmentation = np.zeros(mask.shape, dtype=np.int8) for i in range(n_labels): segmentation = segmentation + (i+1)*segmentation4D[:, :, :, i] # Compute regularization regularization = compute_regularization(points, spatial_rel, segmentation, priors.shape) regularization = np.concatenate((1 - np.sum(regularization, axis=3, keepdims=True), regularization), axis=3) posteriors = np.concatenate((1 - np.sum(posteriors, axis=3, keepdims=True), posteriors), axis=3) priors = posteriors * regularization priors = priors[:, :, :, 1:]/ \ np.repeat(np.sum(priors, axis=3, keepdims=True) + EPS, n_labels, axis=3) seg_diff = np.sum(np.abs(segmentation_old - segmentation)>0) print("%d voxels changed."%(seg_diff)) segmentation_old = segmentation iteration += 1 # Remove spatial relations try: os.remove(spatial_rel_file) except OSError: pass # Write out the results write_image(segmentation, mask_image.coordmap, segmentation_file) write_image(segmentation4D, priors_image.coordmap, segmentation4D_file) def compute_scores(segmentation_file, target_label_file, score_names): """ Computes summary scores to evaluate the segmentations. Parameters ---------- segmentation_file : string File name for the automatically segmented label image. target_label_file : string File name for the manually segmented label image. score_names : list List of scores to return. Returns ---------- scores : dict Dictionary with the scores of the subjects. """ target_labels_image = load_image(target_label_file) voxel_size = np.array(target_labels_image.header.get_zooms()) voxel_vol = voxel_size[0]*voxel_size[1]*voxel_size[2] target_labels = target_labels_image.get_data()>0 segmentation = load_image(segmentation_file).get_data()>0 scores = { 'dice': list(), 'difference': list(), 'manual_vol': list(), 'automated_vol': list() } # Compute the scores for all the subjects in the list for i in range(segmentation.shape[3]): target_labels_vol = np.sum(target_labels[:,:,:,i]) segmentation_vol = np.sum(segmentation[:,:,:,i]) if 'dice' in score_names: dice = 2*np.sum(target_labels[:,:,:,i]*segmentation[:,:,:,i])/\ (target_labels_vol + segmentation_vol) scores['dice'].append(str(dice)) if 'difference' in score_names: d1 = np.sum((1*target_labels[:,:,:,i]-1*segmentation[:,:,:,i])>0) d2 = np.sum((-1*target_labels[:,:,:,i]+1*segmentation[:,:,:,i])>0) difference = (d1 + d2)/(target_labels_vol + segmentation_vol) scores['difference'].append(str(difference)) if 'manual_vol' in score_names: manual_vol = np.sum(target_labels[:,:,:,i])*voxel_vol scores['manual_vol'].append(str(manual_vol)) if 'automated_vol' in score_names: automated_vol = np.sum(segmentation[:,:,:,i])*voxel_vol scores['automated_vol'].append(str(automated_vol)) return(scores)
# -*- coding: utf-8 -*- from __future__ import with_statement from beehive.compat import unicode from beehive import model, i18n DEFAULT_LANGUAGE = 'en' def parse_file(filename, language=None): with open(filename, 'rb') as f: # file encoding is assumed to be utf8. Oh, yes. data = f.read().decode('utf8') return parse_feature(data, language, filename) def parse_feature(data, language=None, filename=None): # ALL data operated on by the parser MUST be unicode assert isinstance(data, unicode) try: result = Parser(language).parse(data, filename) except ParserError as e: e.filename = filename raise return result def parse_steps(text, language=None, filename=None): """ Parse a number of steps a multi-line text from a scenario. Scenario line with title and keyword is not provided. :param text: Multi-line text with steps to parse (as unicode). :param language: i18n language identifier (optional). :param filename: Filename (optional). :return: Parsed steps (if successful). """ assert isinstance(text, unicode) try: result = Parser(language, variant='steps').parse_steps(text, filename) except ParserError as e: e.filename = filename raise return result def parse_tags(text): """ Parse tags from text (one or more lines, as string). :param text: Multi-line text with tags to parse (as unicode). :return: List of tags (if successful). """ # assert isinstance(text, unicode) if not text: return [] return Parser().parse_tags(text) class ParserError(Exception): def __init__(self, message, line, filename=None, line_text=None): if line: message += ' at line %d' % line if line_text: message += ": '%s'" % line_text.strip() super(ParserError, self).__init__(message) self.line = line self.line_text = line_text self.filename = filename def __str__(self): if self.filename: return 'Failed to parse "%s": %s' % (self.filename, self.args[0]) return 'Failed to parse <string>: %s' % self.args[0] class Parser(object): # pylint: disable=W0201,R0902 # W0201 Attribute ... defined outside __init__() method => reset() # R0902 Too many instance attributes (15/10) def __init__(self, language=None, variant=None): if not variant: variant = 'feature' self.language = language self.variant = variant self.reset() def reset(self): # This can probably go away. if self.language: self.keywords = i18n.languages[self.language] else: self.keywords = None self.state = 'init' self.line = 0 self.last_step = None self.multiline_start = None self.multiline_leading = None self.multiline_terminator = None self.filename = None self.feature = None self.statement = None self.tags = [] self.lines = [] self.table = None self.examples = None def parse(self, data, filename=None): self.reset() self.filename = filename for line in data.split('\n'): self.line += 1 if not line.strip() and not self.state == 'multiline': # -- SKIP EMPTY LINES, except in multiline string args. continue self.action(line) if self.table: self.action_table('') feature = self.feature if feature: feature.parser = self self.reset() return feature def _build_feature(self, keyword, line): name = line[len(keyword) + 1:].strip() self.feature = model.Feature(self.filename, self.line, keyword, name, tags=self.tags) # -- RESET STATE: self.tags = [] def _build_background_statement(self, keyword, line): if self.tags: msg = 'Background supports no tags: @%s' % (' @'.join(self.tags)) raise ParserError(msg, self.line, self.filename, line) name = line[len(keyword) + 1:].strip() statement = model.Background(self.filename, self.line, keyword, name) self.statement = statement self.feature.background = self.statement def _build_scenario_statement(self, keyword, line): name = line[len(keyword) + 1:].strip() self.statement = model.Scenario(self.filename, self.line, keyword, name, tags=self.tags) self.feature.add_scenario(self.statement) # -- RESET STATE: self.tags = [] def _build_scenario_outline_statement(self, keyword, line): # pylint: disable=C0103 # C0103 Invalid name "build_scenario_outline_statement", too long. name = line[len(keyword) + 1:].strip() self.statement = model.ScenarioOutline(self.filename, self.line, keyword, name, tags=self.tags) self.feature.add_scenario(self.statement) # -- RESET STATE: self.tags = [] def _build_examples(self, keyword, line): if not isinstance(self.statement, model.ScenarioOutline): message = 'Examples must only appear inside scenario outline' raise ParserError(message, self.line, self.filename, line) name = line[len(keyword) + 1:].strip() self.examples = model.Examples(self.filename, self.line, keyword, name) # pylint: disable=E1103 # E1103 Instance of 'Background' has no 'examples' member # (but some types could not be inferred). self.statement.examples.append(self.examples) def diagnose_feature_usage_error(self): if self.feature: return "Multiple features in one file are not supported." else: return "Feature should not be used here." def diagnose_background_usage_error(self): if self.feature and self.feature.scenarios: return "Background may not occur after Scenario/ScenarioOutline." elif self.tags: return "Background does not support tags." else: return "Background should not be used here." def diagnose_scenario_usage_error(self): if not self.feature: return "Scenario may not occur before Feature." else: return "Scenario should not be used here." def diagnose_scenario_outline_usage_error(self): if not self.feature: return "ScenarioOutline may not occur before Feature." else: return "ScenarioOutline should not be used here." def ask_parse_failure_oracle(self, line): """ Try to find the failure reason when a parse failure occurs: Oracle, oracle, ... what went wrong? Zzzz :param line: Text line where parse failure occured (as string). :return: Reason (as string) if an explanation is found. Otherwise, empty string or None. """ feature_kwd = self.match_keyword('feature', line) if feature_kwd: return self.diagnose_feature_usage_error() background_kwd = self.match_keyword('background', line) if background_kwd: return self.diagnose_background_usage_error() scenario_kwd = self.match_keyword('scenario', line) if scenario_kwd: return self.diagnose_scenario_usage_error() scenario_outline_kwd = self.match_keyword('scenario_outline', line) if scenario_outline_kwd: return self.diagnose_scenario_outline_usage_error() # -- OTHERWISE: if self.variant == 'feature' and not self.feature: return "No feature found." # -- FINALLY: No glue what went wrong. return None def action(self, line): if line.strip().startswith('#') and not self.state == 'multiline': if self.keywords or self.state != 'init' or self.tags: return line = line.strip()[1:].strip() if line.lstrip().lower().startswith('language:'): language = line[9:].strip() self.language = language self.keywords = i18n.languages[language] return func = getattr(self, 'action_' + self.state, None) if func is None: line = line.strip() msg = "Parser in unknown state %s;" % self.state raise ParserError(msg, self.line, self.filename, line) if not func(line): line = line.strip() msg = u"\nParser failure in state %s, at line %d: '%s'\n" % \ (self.state, self.line, line) reason = self.ask_parse_failure_oracle(line) if reason: msg += u"REASON: %s" % reason raise ParserError(msg, None, self.filename) def action_init(self, line): line = line.strip() if line.startswith('@'): self.tags.extend(self.parse_tags(line)) return True feature_kwd = self.match_keyword('feature', line) if feature_kwd: self._build_feature(feature_kwd, line) self.state = 'feature' return True return False def subaction_detect_next_scenario(self, line): if line.startswith('@'): self.tags.extend(self.parse_tags(line)) self.state = 'next_scenario' return True scenario_kwd = self.match_keyword('scenario', line) if scenario_kwd: self._build_scenario_statement(scenario_kwd, line) self.state = 'scenario' return True scenario_outline_kwd = self.match_keyword('scenario_outline', line) if scenario_outline_kwd: self._build_scenario_outline_statement(scenario_outline_kwd, line) self.state = 'scenario' return True # -- OTHERWISE: return False def action_feature(self, line): line = line.strip() if self.subaction_detect_next_scenario(line): return True background_kwd = self.match_keyword('background', line) if background_kwd: self._build_background_statement(background_kwd, line) self.state = 'steps' return True self.feature.description.append(line) return True def action_next_scenario(self, line): """ Entered after first tag for Scenario/ScenarioOutline is detected. """ line = line.strip() if self.subaction_detect_next_scenario(line): return True return False def action_scenario(self, line): """ Entered when Scenario/ScenarioOutline keyword/line is detected. Hunts/collects scenario description lines. DETECT: * first step of Scenario/ScenarioOutline * next Scenario/ScenarioOutline. """ line = line.strip() step = self.parse_step(line) if step: # -- FIRST STEP DETECTED: End collection of scenario descriptions. self.state = 'steps' self.statement.steps.append(step) return True # -- CASE: Detect next Scenario/ScenarioOutline # * Scenario with scenario description, but without steps. # * Title-only scenario without scenario description and steps. if self.subaction_detect_next_scenario(line): return True # -- OTHERWISE: Add scenario description line. # pylint: disable=E1103 # E1103 Instance of 'Background' has no 'description' member... self.statement.description.append(line) return True def action_steps(self, line): """ Entered when first step is detected (or nested step parsing). Subcases: * step * multi-line text (doc-string), following a step * table, following a step * examples for a ScenarioOutline, after ScenarioOutline steps DETECT: * next Scenario/ScenarioOutline """ # pylint: disable=R0911 # R0911 Too many return statements (8/6) stripped = line.lstrip() if stripped.startswith('"""') or stripped.startswith("'''"): self.state = 'multiline' self.multiline_start = self.line self.multiline_terminator = stripped[:3] self.multiline_leading = line.index(stripped[0]) return True line = line.strip() step = self.parse_step(line) if step: self.statement.steps.append(step) return True if self.subaction_detect_next_scenario(line): return True examples_kwd = self.match_keyword('examples', line) if examples_kwd: self._build_examples(examples_kwd, line) self.state = 'table' return True if line.startswith('|'): assert self.statement.steps, "TABLE-START without step detected." self.state = 'table' return self.action_table(line) return False def action_multiline(self, line): if line.strip().startswith(self.multiline_terminator): step = self.statement.steps[-1] step.text = model.Text(u'\n'.join(self.lines), u'text/plain', self.multiline_start) if step.name.endswith(':'): step.name = step.name[:-1] self.lines = [] self.multiline_terminator = None self.state = 'steps' return True self.lines.append(line[self.multiline_leading:]) # -- BETTER DIAGNOSTICS: May remove non-whitespace in execute_steps() removed_line_prefix = line[:self.multiline_leading] if removed_line_prefix.strip(): message = "BAD-INDENT in multiline text: " message += "Line '%s' would strip leading '%s'" % \ (line, removed_line_prefix) raise ParserError(message, self.line, self.filename) return True def action_table(self, line): line = line.strip() if not line.startswith('|'): if self.examples: self.examples.table = self.table self.examples = None else: step = self.statement.steps[-1] step.table = self.table if step.name.endswith(':'): step.name = step.name[:-1] self.table = None self.state = 'steps' return self.action_steps(line) cells = [cell.strip() for cell in line.split('|')[1:-1]] if self.table is None: self.table = model.Table(cells, self.line) else: if len(cells) != len(self.table.headings): raise ParserError("Malformed table", self.line) self.table.add_row(cells, self.line) return True def match_keyword(self, keyword, line): if not self.keywords: self.language = DEFAULT_LANGUAGE self.keywords = i18n.languages[DEFAULT_LANGUAGE] for alias in self.keywords[keyword]: if line.startswith(alias + ':'): return alias return False def parse_tags(self, line): ''' Parse a line with one or more tags: * A tag starts with the AT sign. * A tag consists of one word without whitespace chars. * Multiple tags are separated with whitespace chars * End-of-line comment is stripped. :param line: Line with one/more tags to process. :raise ParseError: If syntax error is detected. ''' assert line.startswith('@') tags = [] for word in line.split(): if word.startswith('@'): tags.append(model.Tag(word[1:], self.line)) elif word.startswith('#'): break # -- COMMENT: Skip rest of line. else: # -- BAD-TAG: Abort here. raise ParserError("tag: %s (line: %s)" % (word, line), self.line, self.filename) return tags def parse_step(self, line): for step_type in ('given', 'when', 'then', 'and', 'but'): for kw in self.keywords[step_type]: if kw.endswith('<'): whitespace = '' kw = kw[:-1] else: whitespace = ' ' # try to match the keyword; also attempt a purely lowercase # match if that'll work if not (line.startswith(kw + whitespace) or line.lower().startswith(kw.lower() + whitespace)): continue name = line[len(kw):].strip() if step_type in ('and', 'but'): if not self.last_step: raise ParserError("No previous step", self.line) step_type = self.last_step else: self.last_step = step_type step = model.Step(self.filename, self.line, kw, step_type, name) return step return None def parse_steps(self, text, filename=None): """ Parse support for execute_steps() functionality that supports step with: * multiline text * table :param text: Text that contains 0..* steps :return: List of parsed steps (as model.Step objects). """ assert isinstance(text, unicode) if not self.language: self.language = u"en" self.reset() self.filename = filename self.statement = model.Scenario(filename, 0, u"scenario", u"") self.state = 'steps' for line in text.split("\n"): self.line += 1 if not line.strip() and not self.state == 'multiline': # -- SKIP EMPTY LINES, except in multiline string args. continue self.action(line) # -- FINALLY: if self.table: self.action_table("") steps = self.statement.steps return steps
# -*- coding: utf-8 -*- """ Rewriting standard tasks for the NLP pipeline using the ``Stanford CoreNLP`` server. The main motivation to do this lies in the following problem: When the respective Stanford tools are run through their ``NLTK`` wrappers, they load their necessary models from scratch every time. This slows down the pipeline quite a lot. In contrast, the server loads them only once. Also, this approach comes with some other merits as well: * Reducing the number of necessary Stanford model files * Avoiding using bulk operations like ``parse_sents()`` which complicate the current architecture of the pipeline. """ # PROJECT from bwg.mixins import CoreNLPServerMixin from bwg.tasks.naive_ore import ( NaiveOpenRelationExtractionTask ) from bwg.tasks.ner import NERTask from bwg.tasks.pos_tagging import PoSTaggingTask from bwg.tasks.dependency_parsing import DependencyParseTask class ServerNERTask(CoreNLPServerMixin, NERTask): """ Luigi task that performs Named Entity Recognition on a corpus, but using a Stanford CoreNLP server. """ def _ner_tag(self, sentence_data, **workflow_resources): """ Tag a single sentence with named entities using a Stanford CoreNLP server. :param sentence_data: Data of the sentence that is going to be named entity tagged. :type sentence_data: dict :param workflow_resources: Additional resources for this step. :type workflow_resources: dict :return: Processed sentence. :rtype: dict """ return self.process_sentence_with_corenlp_server( sentence_data, action="ner", postprocessing_func=self._postprocess_ner_tagged ) def _postprocess_ner_tagged(self, result_json): """ Apply a bit of postprocessing to the tagged data (mainly to be consistent with the taggers output if you don't use CoreNLP server). :param result_json: Processed sentence data. :type result_json: dict :return: Sentence data as a list of tuples. :rtype: list """ if len(result_json["sentences"]) == 0: return [] token_dicts = result_json["sentences"][0]["tokens"] return [ (token_dict["word"], token_dict["ner"]) for token_dict in token_dicts ] class ServerDependencyParseTask(CoreNLPServerMixin, DependencyParseTask): """ Luigi task that dependency-parses sentences in a corpus, but using a Stanford CoreNLP server. """ def _dependency_parse(self, sentence_data, **workflow_resources): """ Dependency parse a sentence using a Stanford CoreNLP server. :param sentence_data: Data of the sentence that is going to be dependency parsed. :type sentence_data: dict :param workflow_resources: Additional resources for this step. :type workflow_resources: dict :return: Processed sentence. :rtype: dict """ return self.process_sentence_with_corenlp_server( sentence_data, action="depparse", postprocessing_func=self._postprocess_dependency_parsed, ) def _postprocess_dependency_parsed(self, result_json): """ Apply a bit of postprocessing to the parsed data (mainly to be consistent with the taggers output if you don't use the `CoreNLP server``). :param result_json: Processed sentence data. :type result_json: dict :return: Dependency parse as dictionary. :rtype: dict """ if len(result_json["sentences"]) == 0: return [] edges = result_json["sentences"][0]["basicDependencies"] return self.edges_to_nodes(edges) def edges_to_nodes(self, edges): """ Turn a dependency tree representation based on nodes to a representation based on edges. :param edges: List of tree edges. :type edges: list :return: Dictionary with the root node and a list of all nodes. :rtype: dict """ nodes = {} for edge in edges: # Create not if they don't exist governing_address = edge["governor"] if governing_address not in nodes: nodes[governing_address] = self._create_node(governing_address, edge["governorGloss"], None) dependent_address = edge["dependent"] if dependent_address not in nodes: nodes[dependent_address] = self._create_node(dependent_address, edge["dependentGloss"], edge["dep"]) elif nodes[dependent_address]["rel"] is None: nodes[dependent_address]["rel"] = edge["dep"] # Create connections if they don't exist if dependent_address not in nodes[governing_address]["deps"]: if edge["dep"] in nodes[governing_address]["deps"]: nodes[governing_address]["deps"][edge["dep"]].append(dependent_address) else: nodes[governing_address]["deps"][edge["dep"]] = [dependent_address] root = [node for node_address, node in nodes.items() if node["rel"] == "ROOT"][0] return { "nodes": nodes, "root": root } @staticmethod def _create_node(node_address, node_gloss, node_rel): """ Create a dictionary representation of a dependency tree node. :param node_address: Node number. :type node_address: int :param node_gloss: Word of this node. :type node_gloss: str :param node_rel: Relation of this node to its head. :type node_rel: str, None :return: Dictionary node representation. :rtype: dict """ return { "address": node_address, "word": node_gloss, "rel": node_rel, "deps": {} } class ServerPoSTaggingTask(CoreNLPServerMixin, PoSTaggingTask): """ Luigi task that PoS tags a sentence in a corpus, but using a Stanford CoreNLP server. """ def _pos_tag(self, sentence_data, **workflow_resources): """ Tag a single sentence with Part-of-Speech tags using a Stanford CoreNLP server. :param sentence_data: Data of the sentence that is going to be pos tagged. :type sentence_data: dict :param workflow_resources: Additional resources for this step. :type workflow_resources: dict :return: Processed sentence. :rtype: dict """ return self.process_sentence_with_corenlp_server( sentence_data, action="pos", postprocessing_func=self._postprocess_pos_tagged, ) def _postprocess_pos_tagged(self, result_json): """ Apply a bit of postprocessing to the parsed data (mainly to be consistent with the taggers output if you don't use CoreNLP server). :param result_json: Processed sentence data. :type result_json: dict :return: Dependency parse as dictionary. :rtype: dict """ if len(result_json["sentences"]) == 0: return [] token_dicts = result_json["sentences"][0]["tokens"] return [ (token_dict["word"], token_dict["pos"]) for token_dict in token_dicts ] class ServerNaiveOpenRelationExtractionTask(NaiveOpenRelationExtractionTask): """ Luigi task that performs Open Relation extraction on a corpus. The only adjustment in this case are the requirements for this task, this task doesn't use the CoreNLP server at all. """ def requires(self): return ServerNERTask(task_config=self.task_config),\ ServerDependencyParseTask(task_config=self.task_config),\ ServerPoSTaggingTask(task_config=self.task_config)
#!/usr/bin/python ## Some info should go here. #TODO: add these features: # [ ] works in login window # [x] mirroring # [x] brightness settings # [ ] HDMI overscan # [ ] AirPlay mirroring # [x] set individual display settings # [x] HiDPI/no HiDPI options ## Imports import argparse import objc import os import sys import CoreFoundation import Quartz ## Global Variables attributes = { 'long_name' : 'Display Manager', 'name' : os.path.basename(sys.argv[0]), 'version' : '0.9.3' } kMaxDisplays = 32 ## Manual metadata loading. def initialize_iokit_functions_and_variables(): """ This handles the importing of specific functions and variables from the IOKit framework. IOKit is not natively bridged in PyObjC, and so the methods must be found and encoded manually to gain their functionality in Python. After calling this function, the following IOKit functions are available: IOServiceGetMatchingServices Look up the registered IOService objects that match the given dict. IODisplayCreateInfoDictionary Returns a dictionary with information about display hardware. IODisplayGetFloatParameter Finds a float value for a given parameter. IODisplaySetFloatParameter Sets a float value for a given parameter. IOServiceMatching Returns a dictionary that specifies an IOService class match. IOIteratorNext Finds the next object in an iteration. And the following variables are available: kIODisplayNoProductName Prevents IODisplayCreateInfoDictionary from including the kIODisplayProductName property. kIOMasterPortDefault The default mach port used to initiate communication with IOKit. kIODisplayBrightnessKey The key used to get brightness from IODisplayGetFloatParameter. kDisplayVendorID kDisplayProductID kDisplaySerialNumber These are keys used to access display information. """ # Grab the IOKit framework. iokit = objc.initFrameworkWrapper( "IOKit", frameworkIdentifier="com.apple.iokit", frameworkPath=objc.pathForFramework("/System/Library/Frameworks/IOKit.framework"), globals=globals() ) # These are the functions we're going to need. functions = [ ("IOServiceGetMatchingServices", b"iI@o^I"), ("IODisplayCreateInfoDictionary", b"@II"), ("IODisplayGetFloatParameter", b"iII@o^f"), ("IODisplaySetFloatParameter", b"iII@f"), ("IOServiceMatching", b"@or*", "", dict( # This one is obnoxious. The "*" gets pythonified as a char, not a # char*, so we have to make it interpret as a string. arguments= { 0: dict(type=objc._C_PTR + objc._C_CHAR_AS_TEXT, c_array_delimited_by_null=True, type_modifier=objc._C_IN) } )), ("IOIteratorNext", "II"), ] # Variables we'll need. variables = [ ("kIODisplayNoProductName", b"I"), ("kIOMasterPortDefault", b"I"), ("kIODisplayBrightnessKey", b"*"), ("kDisplayVendorID", b"*"), ("kDisplayProductID", b"*"), ("kDisplaySerialNumber", b"*"), ] # Load the things! objc.loadBundleFunctions(iokit, globals(), functions) objc.loadBundleVariables(iokit, globals(), variables) # Set this key for later use. global kDisplayBrightness kDisplayBrightness = CoreFoundation.CFSTR(kIODisplayBrightnessKey) def CFNumberEqualsUInt32(number, uint32): """ Determines whether a number and a uint32_t are equivalent. :param number: The returned result from a CFDictionaryGetValue call. This call can return "None", which does not match exactly with "0". :param uint32: The result from Quartz library calls for display information. This is an integer of sorts. :return: A boolean; whether the two values are equivalent. """ if number is None: return uint32 == 0 return number == uint32 def CGDisplayGetIOServicePort(display): """ Since CGDisplayIOServicePort was deprecated in 10.9, we have to rebuild the equivalent function. This is effectively taken from: https://github.com/nriley/brightness/blob/master/brightness.c :param display: A display identifier. :return: The integer value of the matching service port (or 0 if none can be found). """ # Get values from current display. vendor = Quartz.CGDisplayVendorNumber(display) model = Quartz.CGDisplayModelNumber(display) serial = Quartz.CGDisplaySerialNumber(display) # Get matching service name. matching = IOServiceMatching("IODisplayConnect") # Get the iterator for all service ports. error, iterator = IOServiceGetMatchingServices(kIOMasterPortDefault, matching, None) if error: # Did we get an error? return 0 # Begin iteration. service = IOIteratorNext(iterator) matching_service = 0 while service != 0: # Until we find the desired service, keep iterating. # Get the information for the current service. info = IODisplayCreateInfoDictionary(service, kIODisplayNoProductName) # Get the vendor ID, product ID, and serial number. vendorID = CoreFoundation.CFDictionaryGetValue(info, CoreFoundation.CFSTR(kDisplayVendorID)) productID = CoreFoundation.CFDictionaryGetValue(info, CoreFoundation.CFSTR(kDisplayProductID)) serialNumber = CoreFoundation.CFDictionaryGetValue(info, CoreFoundation.CFSTR(kDisplaySerialNumber)) # Check if everything matches. if ( CFNumberEqualsUInt32(vendorID, vendor) and CFNumberEqualsUInt32(productID, model) and CFNumberEqualsUInt32(serialNumber, serial) ): # If it does, then we've found our service port, so break out. matching_service = service break # Otherwise, keep searching. service = IOIteratorNext(iterator) # Return what we've found. return matching_service ## Struct-like thing for easy display. class DisplayMode(object): """ This class describes a display mode, at least as I like to look at them. It has width, height, bits per pixel (pixel encoding), and refresh rate. It can also have the raw mode (if it's based on a real mode from the system) and a HiDPI scalar value (if the mode represents a scaled mode). This also calculates the total pixel count (width * height) and the display ratio (width / height). These are used to help with matching of different display modes with similar properties. """ def __init__(self, mode=None, width=None, height=None, bpp=None, refresh=None): if mode: self.width = Quartz.CGDisplayModeGetWidth(mode) self.height = Quartz.CGDisplayModeGetHeight(mode) self.bpp = get_pixel_depth_from_encoding(Quartz.CGDisplayModeCopyPixelEncoding(mode)) self.refresh = Quartz.CGDisplayModeGetRefreshRate(mode) self.raw_mode = mode self.dpi_scalar = get_hidpi_scalar(mode) else: for element in [width, height, bpp, refresh]: if element is None: raise ValueError("Must give all of width, height, bits per pixel, and refresh rate to construct DisplayMode.") self.width = width self.height = height self.bpp = bpp self.refresh = refresh self.raw_mode = None self.dpi_scalar = None self.pixels = self.width * self.height self.ratio = float(self.width) / float(self.height) def __str__(self): return "{width}x{height}; pixel depth: {bpp}; refresh rate: {refresh}; ratio: {ratio:.2f}:1{hidpi}".format( width = self.width, height = self.height, bpp = self.bpp, refresh = self.refresh, ratio = self.ratio, hidpi = "; [HiDPI: x{}]".format(self.dpi_scalar) if self.dpi_scalar else "" ) def __repr__(self): return self.__str__() def __eq__(self, other): return ( isinstance(other, self.__class__) and self.width == other.width and self.height == other.height and self.bpp == other.bpp and self.refresh == other.refresh and self.dpi_scalar == other.dpi_scalar ) def __ne__(self, other): return not self.__eq__(other) ## Helper Functions def get_hidpi_scalar(mode): """ Uses extra methods to find the HiDPI scalar for a display. :param mode: The raw mode from the Quartz library for the display. :return: Either None if there is no scaling, or else the value of the scaling scalar. """ raw_width = Quartz.CGDisplayModeGetPixelWidth(mode) raw_height = Quartz.CGDisplayModeGetPixelHeight(mode) res_width = Quartz.CGDisplayModeGetWidth(mode) res_height = Quartz.CGDisplayModeGetHeight(mode) if raw_width == res_width and raw_height == res_height: return None else: if raw_width / res_width != raw_height / res_height: raise RuntimeError("Vertical and horizontal dimensions aren't scaled properly... mode: {}".format(mode)) return raw_width / res_width def get_hidpi_value(no_hidpi, only_hidpi): """ Returns a numeric value describing the HiDPI mode desired. :param no_hidpi: Whether to exclude HiDPI modes from the search. :param only_hidpi: Whether to only include HiDPI modes. :return: An integer describing the combination of these. """ if no_hidpi and not only_hidpi: return 0 elif not no_hidpi and not only_hidpi: return 1 elif not no_hidpi and only_hidpi: return 2 else: raise ValueError("Error: Cannot require both no HiDPI and only HiDPI. Make up your mind.") def get_pixel_depth_from_encoding(encoding): """ Takes a pixel encoding and returns an integer representing that encoding. :param encoding: A pixel encoding from Quartz's method CGDisplayModeCopyPixelEncoding. :return: An integer representing the pixel depth of that encoding. """ if encoding == "PPPPPPPP": return 8 elif encoding == "-RRRRRGGGGGBBBBB": return 16 elif encoding == "--------RRRRRRRRGGGGGGGGBBBBBBBB": return 32 elif encoding == "--RRRRRRRRRRGGGGGGGGGGBBBBBBBBBB": return 30 else: raise RuntimeError("Unknown pixel encoding: {}".format(encoding)) def get_displays_list(): """ Gets a list of all current displays. :return: A tuple containing all currently-online displays. Each object in the tuple is a display identifier (as an integer). """ (error, online_displays, displays_count) = Quartz.CGGetOnlineDisplayList(kMaxDisplays, None, None) if error: raise RuntimeError("Unable to get displays list.") return online_displays def get_all_modes_for_display(display, hidpi=1): """ Given a display, this finds all of the available supported display modes. The resulting list is sorted so that the highest resolution, highest pixel depth, highest refresh rate modes are at the top. :param display: The identifier of the desired display. :param hidpi: The HiDPI usage mode, specified by get_hidpi_value(). :return: A list of DisplayMode objects, sorted. """ #TODO: The HiDPI call also gets extra things. Fix those. # Specifically, it includes not just HiDPI settings, but also settings for # different pixel encodings than the standard 8-, 16-, and 32-bit. # Unfortunately, the CGDisplayModeCopyPixelEncoding method cannot see other # encodings, leading to apparently-duplicated values. Not ideal. if hidpi: modes = [DisplayMode(mode=mode) for mode in Quartz.CGDisplayCopyAllDisplayModes(display, {Quartz.kCGDisplayShowDuplicateLowResolutionModes: Quartz.kCFBooleanTrue})] else: modes = [DisplayMode(mode=mode) for mode in Quartz.CGDisplayCopyAllDisplayModes(display, None)] if hidpi == 2: # This removes extra modes, so only HiDPI-scaled modes are displayed. modes = [mode for mode in modes if mode.dpi_scalar is not None] # Sort the modes! modes.sort(key = lambda mode: mode.refresh, reverse = True) modes.sort(key = lambda mode: mode.bpp, reverse = True) modes.sort(key = lambda mode: mode.pixels, reverse = True) return modes def get_current_mode_for_display(display): """ Gets the current display mode for a given display. :param: The identifier of the desired display. :return: The current DisplayMode used by that display. """ return DisplayMode(mode=Quartz.CGDisplayCopyDisplayMode(display)) def get_all_current_configurations(): """ Gets a list of all displays and their associated current display modes. :return: A list of tuples as: (display identifier, [current DisplayMode for that display]) """ modes = [] for display in get_displays_list(): modes.append( (display, get_current_mode_for_display(display)) ) return modes def get_all_modes_for_all_displays(hidpi=1): """ Gets a list of displays and all available modes for each display. :param hidpi: Whether to include additional, "duplicate" modes. :return: A list of tuples as: (display identifier, [all valid DisplayModes for that display]) """ modes = [] for display in get_displays_list(): modes.append( (display, get_all_modes_for_display(display, hidpi)) ) return modes def get_mode_closest_to_values(modes, width, height, depth, refresh): """ Given a set of values and a list of available modes, attempts to find the closest matching supported mode in the list. "Closest matching" is determined as follows: 1. Is the desired mode in the list exactly? 2. If not, is there a mode with the desired resolution and bit depth? a. Find the closest matching refresh rate available. 3. If not, is there a mode with the desired resolution? a. Find the closest matching bit depth available. 4. If not, is there a mode with the desired Width:Height ratio? a. Find the closest resolution available. 5. If not... a. Find the closest aspect ratio. b. Find the closest resolution. 6. If there is still nothing, return None. :param modes: A list containing DisplayMode objects. :param width: An integer for display width. :param height: An integer for display height. :param depth: The pixel depth for the display. :param refresh: The refresh rate for the display. :return: The DisplayMode from the list that matches the values best. """ # Check we don't have an empty list of modes. if not modes: return None match_mode = DisplayMode(width=width, height=height, bpp=depth, refresh=refresh) match_ratio = width / height match_pixels = width * height # Search for an exact match. for mode in modes: if mode == match_mode: return mode # No exact match, so let's check if there's a resolution and bit depth match. close_matches = [] for mode in modes: if mode.width == width and mode.height == height and mode.bpp == depth: # Found one with the correct resolution. close_matches.append(mode) if close_matches: # There's at least one match at the correct resolution and bit depth. close_matches.sort(key = lambda mode: mode.refresh, reverse = True) larger = None smaller = None # Find the two closest matches by refresh rate. for match in close_matches: if match.refresh > refresh: larger = match else: smaller = match break # Check some edge cases. if smaller and not larger: # All the available refresh rates are lesser than the desired. return smaller if larger and not smaller: # There's only one element in the list, and it's larger than we # ideally wanted. Oh well. return larger # Okay, now we have two elements, and neither is perfect. # Find the closer of the two. larger_dif = abs(larger.refresh - refresh) smaller_dif = abs(smaller.refresh - refresh) if smaller_dif < larger_dif: return smaller else: return larger # No matches for WHD, so let's check that bit depth. close_matches = [] for mode in modes: if mode.width == width and mode.height == height: # Found one with the right resolution. close_matches.append(mode) if close_matches: # We have the correct resolution. Let's find the closest bit depth. close_matches.sort(key = lambda mode: mode.bpp, reverse = True) larger = None smaller = None # Find the two closest matches by bit depth. for match in close_matches: if match.bpp > depth: larger = match else: smaller = match break # Check some edge cases. if smaller and not larger: # All the available bit depths are lesser than the desired. return smaller if larger and not smaller: # There's only one element in the list, and it's larger than we # ideally wanted. Oh well. return larger # Okay, now we have two elements, and neither is perfect. # Find the closer of the two. larger_dif = abs(larger.bpp - depth) smaller_dif = abs(smaller.bpp - depth) if smaller_dif < larger_dif: return smaller else: return larger # At this point, we don't even have a good resolution match. # Let's find all the modes with the appropriate ratio, and then find the # closest total pixel count. close_matches = [] for mode in modes: if mode.ratio == match_ratio: # Got the right width:height ratio. close_matches.append(mode) if close_matches: # Sort by total pixels. close_matches.sort(key = lambda mode: mode.pixels, reverse = True) larger = None smaller = None # Find the closest matches by pixel count. for match in close_matches: if match.pixels > match_pixels: larger = match else: smaller = match break # Check some edge cases. if smaller and not larger: # All the available pixel counts are lesser than the desired. return smaller if larger and not smaller: # There's only one element in the list, and it's larger than we # ideally wanted. Oh well. return larger # Okay, now we have two elements, and neither is perfect. # Find the closer of the two. larger_dif = abs(larger.pixels - match_pixels) smaller_dif = abs(smaller.pixels - match_pixels) if smaller_dif < larger_dif: return smaller else: return larger # Still no good matches. Okay, now we're really reaching. # Let's try to find all of the displays with a sort-of-close aspect ratio, # and then find the one in there that has the closest total pixel count. ratios = [] for mode in modes: ratios.append(mode.ratio) ratios = list(set(ratios)) ratios.sort(reverse = True) larger_ratio = None smaller_ratio = None ideal_ratio = None for ratio in ratios: if ratio > match_ratio: larger_ratio = ratio else: smaller_ratio = ratio break if smaller_ratio and not larger_ratio: ideal_ratio = smaller_ratio elif larger_ratio and not smaller_ratio: ideal_ratio = larger_ratio else: larger_dif = abs(larger_ratio - match_ratio) smaller_dif = abs(smaller_ratio - match_ratio) if smaller_dif < larger_dif: ideal_ratio = smaller_ratio else: ideal_ratio = larger_ratio # Now find all the matches with the ideal ratio. close_matches = [] for mode in modes: if mode.ratio == ideal_ratio: close_matches.append(mode) # And now we look through those for the closest match in pixel count. if close_matches: # Sort by total pixels. close_matches.sort(key = lambda mode: mode.pixels, reverse = True) larger = None smaller = None # Find the closest matches by pixel count. for match in close_matches: if match.pixels > match_pixels: larger = match else: smaller = match break # Check some edge cases. if smaller and not larger: # All the available pixel counts are lesser than the desired. return smaller if larger and not smaller: # There's only one element in the list, and it's larger than we # ideally wanted. Oh well. return larger # Okay, now we have two elements, and neither is perfect. # Find the closer of the two. larger_dif = abs(larger.pixels - match_pixels) smaller_dif = abs(smaller.pixels - match_pixels) if smaller_dif < larger_dif: return smaller else: return larger # We don't have any good resolutions available. Let's throw an error? return None def set_display(display, mode, mirroring=False, mirror_display=Quartz.CGMainDisplayID()): """ Sets a display to a given configuration. :param display: The identifier of the desired display. :param mode: The DisplayMode to set the display to. :param mirroring: Whether to activate mirroring. :param mirror_display: The identifier of the display to mirror. """ print("Setting display {} to mode: {}".format(display, mode)) # Begin the configuration. (error, config_ref) = Quartz.CGBeginDisplayConfiguration(None) # Check there were no errors. if error: print("Could not begin display configuration: error {}".format(error)) sys.exit(8) # Enact the desired configuration. error = Quartz.CGConfigureDisplayWithDisplayMode(config_ref, display, mode.raw_mode, None) # Were there errors? if error: print("Failed to set display configuration: error {}".format(error)) # Yeah, there were errors. Let's cancel the configuration. error = Quartz.CGCancelDisplayConfiguration(config_ref) if error: # Apparently this can fail too? Huh. print("Failed to cancel display configuraiton setting: error {}".format(error)) sys.exit(9) # Did we want mirroring enabled? if mirroring: # Yes, so let's turn it on! We mirror the specified display. if display != mirror_display: Quartz.CGConfigureDisplayMirrorOfDisplay(config_ref, display, mirror_display) else: # I guess not. Don't mirror anything! Quartz.CGConfigureDisplayMirrorOfDisplay(config_ref, display, Quartz.kCGNullDirectDisplay) # Finish the configuration. Quartz.CGCompleteDisplayConfiguration(config_ref, Quartz.kCGConfigurePermanently) def sub_set(command, width, height, depth, refresh, display=None, hidpi=1): """ Handles all of the options for the "set" subcommand. :param command: The command passed in. :param width: Desired width. :param height: Desired height. :param depth: Desired pixel depth. :param refresh: Desired refresh rate. :param display: Specific display to configure. :param hidpi: Description of HiDPI settings from get_hidpi_value(). """ # Get the main display's identifier (since it gets used a lot). main_display = Quartz.CGMainDisplayID() # Iterate over the supported commands. if command == "closest": # Find the closest matching configuration and apply it. for element in [width, height, depth, refresh]: # Make sure they supplied all of the necessary info. if element is None: usage("set") print("Must have all of (width, height, depth, refresh) for closest setting.") sys.exit(2) all_modes = get_all_modes_for_all_displays(hidpi) # They only wanted to set one display. if display: all_modes = [x for x in all_modes if x[0] == display] if not all_modes: print("No matching displays found.") sys.exit(4) print("Setting closest supported display configuration(s).") # Inform what's going on. print("Setting for: {width}x{height} ({ratio:.2f}:1); {bpp} bpp; {refresh} Hz".format( width = width, height = height, ratio = float(width) / float(height), bpp = depth, refresh = refresh )) print('-' * 80) # Make it so! for pair in all_modes: print("Display: {}{}".format(pair[0], " (Main Display)" if pair[0] == main_display else "")) closest = get_mode_closest_to_values(pair[1], width, height, depth, refresh) if closest: print(" {}".format(closest)) set_display(pair[0], closest) else: print(" (no close matches found)") elif command == "highest": # Find the highest display mode and set it. all_modes = get_all_modes_for_all_displays(hidpi) # They only wanted to set one display. if display: all_modes = [x for x in all_modes if x[0] == display] if not all_modes: print("No matching displays found.") sys.exit(4) print("Setting highest supported display configuration(s).") print('-' * 80) for pair in all_modes: # This uses the first mode in the all_modes list, because it is # guaranteed that the list is sorted. print("Display: {}{}".format(pair[0], " (Main Display)" if pair[0] == main_display else "")) print(" {}".format(pair[1][0])) set_display(pair[0], pair[1][0]) elif command == "exact": # Set the exact mode or don't set it at all. all_modes = get_all_modes_for_all_displays(hidpi) # Create a fake exact mode to match against. exact = DisplayMode( width = width, height = height, bpp = depth, refresh = refresh ) # They only wanted to set one display. if display: all_modes = [x for x in all_modes if x[0] == display] if not all_modes: print("No matching displays found.") sys.exit(4) print("Setting exact mode or quitting.") print('-' * 80) for pair in all_modes: print("Display: {}{}".format(pair[0], " (Main Display)" if pair[0] == main_display else "")) closest = get_mode_closest_to_values(pair[1], width, height, depth, refresh) if closest and closest == exact: print(" {}".format(closest)) set_display(pair[0], closest) else: print(" (no exact matches found)") def sub_show(command, width, height, depth, refresh, display=None, hidpi=1): """ Handles all the options for the "show" subcommand. :param command: The command passed in. :param width: Desired width. :param height: Desired height. :param depth: Desired pixel depth. :param refresh: Desired refresh rate. :param display: Specific display to configure. :param hidpi: Description of HiDPI settings from get_hidpi_value(). """ # Get the main display's identifier since it gets used a lot. main_display = Quartz.CGMainDisplayID() # Iterate over the supported commands. if command == "all": # Show all the modes. all_modes = get_all_modes_for_all_displays(hidpi) # Check if they only wanted to show one display's configuration. if display: all_modes = [x for x in all_modes if x[0] == display] if not all_modes: print("No matching displays found ({}).".format(display)) sys.exit(4) print("Showing all possible display configurations.") print('-' * 80) for pair in all_modes: print("Display: {}{}".format(pair[0], " (Main Display)" if pair[0] == main_display else "")) for mode in pair[1]: print(" {}".format(mode)) elif command == "closest": # Only show the closest mode to whatever was specified. for element in [width, height, depth, refresh]: if element is None: usage("show") print("Must have all of (width, height, depth, refresh) for closest matching.") sys.exit(2) all_modes = get_all_modes_for_all_displays(hidpi) # They only wanted to show one display's configuration. if display: all_modes = [x for x in all_modes if x[0] == display] if not all_modes: print("No matching displays found ({}).".format(display)) sys.exit(4) print("Finding closest supported display configuration(s).") # Inform what's going on. print("Searching for: {width}x{height} ({ratio:.2f}:1); {bpp} bpp; {refresh} Hz".format( width = width, height = height, ratio = float(width) / float(height), bpp = depth, refresh = refresh )) print('-' * 80) for pair in all_modes: print("Display: {}{}".format(pair[0], " (Main Display)" if pair[0] == main_display else "")) closest = get_mode_closest_to_values(pair[1], width, height, depth, refresh) if closest: print(" {}".format(closest)) else: print(" (no close matches found)") elif command == "highest": # Show the highest supported display configuration. all_modes = get_all_modes_for_all_displays(hidpi) if display: all_modes = [x for x in all_modes if x[0] == display] if not all_modes: print("No matching displays found ({}).".format(display)) sys.exit(4) print("Showing highest supported display configuration(s).") print('-' * 80) for pair in all_modes: print("Display: {}{}".format(pair[0], " (Main Display)" if pair[0] == main_display else "")) print(" {}".format(pair[1][0])) elif command == "exact": # Show the current display configuration. current_modes = get_all_current_configurations() if display: current_modes = [x for x in current_modes if x[0] == display] if not current_modes: print("No matching displays found ({}).".format(display)) sys.exit(4) print("Showing current display configuration(s).") print('-' * 80) for pair in current_modes: print("Display: {}".format(pair[0])) print(" {}".format(pair[1])) elif command == "displays": # Print a list of online displays. for display in get_displays_list(): print("Display: {}{}".format(display, " (Main Display)" if display == main_display else "")) def sub_brightness(command, brightness=1, display=None): """ Handles all the options for the "brightness" subcommand. :param command: The command passed in. :param brightness: The level of brightness to change to. :param display: Specific display to configure. """ main_display = Quartz.CGMainDisplayID() # We need extra IOKit stuff for this. initialize_iokit_functions_and_variables() if not display: displays = get_displays_list() else: displays = [display] # Iterate over the available options. if command == "show": # Show the current brightness setting. print("Showing current brightness setting(s).") print('-' * 80) for display in displays: service = CGDisplayGetIOServicePort(display) (error, display_brightness) = IODisplayGetFloatParameter(service, 0, kDisplayBrightness, None) if error: print("Failed to get brightness of display {}; error {}".format(display, error)) continue print("Display: {}{}".format(display, " (Main Display)" if display == main_display else "")) print(" {:.2f}%".format(display_brightness * 100)) elif command == "set": # Set the brightness setting. print("Setting display brightness to {:.2f}%".format(brightness * 100)) print('-' * 80) for display in displays: service = CGDisplayGetIOServicePort(display) error = IODisplaySetFloatParameter(service, 0, kDisplayBrightness, brightness) if error: print("Failed to set brightness of display {}; error {}".format(display, error)) continue print("Display: {}{}".format(display, " (Main Display)" if display == main_display else "")) print(" {:.2f}%".format(brightness * 100)) def sub_mirroring(command, display, display_to_mirror=Quartz.CGMainDisplayID()): """ Handles all the options for the "mirroring" subcommand. :param command: The command passed in. :param display: The display to configure mirroring on. :param display_to_mirror: The display to become a mirror of. """ main_display = Quartz.CGMainDisplayID() if not display: displays = get_displays_list() else: displays = [display] # Get the current modes for each display. modes = [] for display in displays: modes.append( (display, get_current_mode_for_display(display)) ) # If we're disabling, then set the mirror target to the null display. if command == "enable": enable_mirroring = True print("Enabling mirroring with target display: {}{}".format(display_to_mirror, " (Main Display)" if display_to_mirror == main_display else "")) if command == "disable": print("Disabling mirroring.") display_to_mirror = Quartz.kCGNullDirectDisplay enable_mirroring = False print('-' * 80) # Effect the changes! for display, mode in modes: print("Display: {}{}".format(display, " (Main Display)" if display == main_display else "")) set_display(display, mode, enable_mirroring, display_to_mirror) ## Helpful command-line information def version(): """ :return: A string containing the version information for this program. """ return ("{name}, version {version}\n".format( name = attributes['long_name'], version = attributes['version'] )) def usage(command=None): """ Prints out the usage information. :param command: The subcommand to print information for. """ # Give the version information always. print(version()) information = {} information['set'] = '''\ usage: {name} set {{ help | closest | highest | exact }} [-w width] [-h height] [-d depth] [-r refresh] [--display display] [--nohidpi] SUBCOMMANDS help Print this help information. closest Set the display settings to the supported resolution that is closest to the specified values. highest Set the display settings to the highest supported resolution. exact Set the display settings to the specified values if they are supported. If they are not, don't change the display. OPTIONS -w width Resolution width. -h height Resolution height. -d depth Color depth. -r refresh Refresh rate. --display display Specify a particular display. --no-hidpi Don't show HiDPI settings. --only-hidpi Only show HiDPI settings. '''.format(name=attributes['name']) information['show'] = '''\ usage: {name} show {{ help | all | closest | highest | exact }} [-w width] [-h height] [-d depth] [-r refresh] [--display display] [--nohidpi] SUBCOMMANDS help Print this help information. all Show all supported resolutions for the display. closest Show the closest matching supported resolution to the specified values. highest Show the highest supported resolution. exact Show the current display configuration. displays Just list the current displays and their IDs. OPTIONS -w width Resolution width. -h height Resolution height. -d depth Color depth. -r refresh Refresh rate. --display display Specify a particular display. --no-hidpi Don't show HiDPI settings. --only-hidpi Only show HiDPI settings. '''.format(name=attributes['name']) information['brightness'] = '''\ usage: {name} brightness {{ help | show }} [--display display] SUBCOMMANDS help Print this help information. show Show the current brightness setting(s). OPTIONS --display display Specify a particular display. '''.format(name=attributes['name']) information['mirroring'] = '''\ usage: {name} brightness {{ help | enable | disable }} [--diplay display] [--mirror-of-display display] SUBCOMMANDS help Print this help information. enable Activate mirroring. disable Deactivate mirroring. OPTIONS --display display Change mirroring settings for 'display'. --mirror-of-display display Set the display to mirror 'display'. '''.format(name=attributes['name']) if command in information: print(information[command]) else: print('''\ usage: {name} {{ help | version | set | show | mirroring | brightness }} Use any of the subcommands with 'help' to get more information: help Print this help information. version Print the version information. set Set the display configuration. show See available display configurations. mirroring Set mirroring configuration. brightness See or set the current display brightness. ''').format(name=attributes['name']) class ArgumentParser(argparse.ArgumentParser): """ Custom argument parser for printing error messages a bit more nicely. """ def error(self, message): print("Error: {}\n".format(message)) usage() self.exit(2) if __name__ == '__main__': # If they don't give any arguments, help them out. if len(sys.argv) < 2: usage() sys.exit(1) if len(sys.argv) == 2 and sys.argv[1] == '--help': usage() sys.exit(0) # Do actual argument parsing. parser = ArgumentParser(add_help=False) parser.add_argument('-v', '--version', action='store_true') # Check whether user wanted version information. # Print the version information and quit. args = parser.parse_known_args() if args[0].version: print(version()) sys.exit(0) # Add the subparsers. subparsers = parser.add_subparsers(dest='subcommand') # Subparser for 'version'. parser_version = subparsers.add_parser('version', add_help=False) # Subparser for 'help'. parser_help = subparsers.add_parser('help', add_help=False) parser_help.add_argument('command', choices=['set', 'show', 'brightness', 'mirroring'], nargs='?', default=None) # Subparser for 'set'. parser_set = subparsers.add_parser('set', add_help=False) parser_set.add_argument('command', choices=['help', 'closest', 'highest', 'exact'], nargs='?', default='closest') # Subparser for 'show'. parser_show = subparsers.add_parser('show', add_help=False) parser_show.add_argument('command', choices=['help', 'all', 'closest', 'highest', 'exact', 'displays'], nargs='?', default='all') # Subparser for 'brightness'. parser_brightness = subparsers.add_parser('brightness', add_help=False) parser_brightness.add_argument('command', choices=['help', 'show', 'set']) parser_brightness.add_argument('brightness', type=float, nargs='?') # Subparser for 'mirroring'. parser_mirroring = subparsers.add_parser('mirroring', add_help=False) parser_mirroring.add_argument('command', choices=['help', 'enable', 'disable']) parser_mirroring.add_argument('--mirror-of-display', type=int, default=Quartz.CGMainDisplayID()) # All of the subcommands have some similar arguments. for subparser in [parser_set, parser_show, parser_brightness, parser_mirroring]: subparser.add_argument('--help', action='store_true') subparser.add_argument('--display', type=int) # These two subparsers have similar arguments. for subparser in [parser_set, parser_show]: subparser.add_argument('-w', '--width', type=int) subparser.add_argument('-h', '--height', type=int) subparser.add_argument('-d', '--depth', type=int) subparser.add_argument('-r', '--refresh', type=int) subparser.add_argument('--no-hidpi', action='store_true') subparser.add_argument('--only-hidpi', action='store_true') # Parse the arguments. # Note that we have to use the leftover arguments from the # parser.parse_known_args() call up above. args = parser.parse_args(args[1]) # If they used the 'help' subcommand, use it smartly. if args.subcommand == 'help': usage(command=args.command) sys.exit(0) if args.subcommand == 'version': print(version()) sys.exit(0) # Check if they wanted help with the subcommand. if args.command == 'help' or args.help: usage(command=args.subcommand) sys.exit(0) # Check we have either all or none of the manual specifications. try: manual = [args.width, args.height, args.depth, args.refresh] if any(manual): if args.subcommand not in ['set', 'show']: usage() print("Error: Cannot supply manual specifications for subcommand '{}'.".format(subcommand)) sys.exit(1) for element in manual: if element is None: usage() print("Error: Must have either all or none of the manual specifications.") sys.exit(1) except AttributeError: # Evidently we're using a subparser without these attributes. # Not an issue. pass # Check if we have specified both not to use HiDPI and only to use HiDPI. try: hidpi = get_hidpi_value(args.no_hidpi, args.only_hidpi) except AttributeError: # Probably using a subparser that doesn't check HiDPI settings. # And that's okay. pass # print(args) # sys.exit(0) if args.subcommand == 'set': sub_set(args.command, args.width, args.height, args.depth, args.refresh, args.display, hidpi) elif args.subcommand == 'show': sub_show(args.command, args.width, args.height, args.depth, args.refresh, args.display, hidpi) elif args.subcommand == 'brightness': sub_brightness(args.command, args.brightness, args.display) elif args.subcommand == 'mirroring': sub_mirroring(args.command, args.display, args.mirror_of_display)
#!/usr/bin/env python # # Copyright (c) 2017 Alex Wang (@DarkAlexWang) # released under The MIT license (MIT) http://opensource.org/licenses/MIT # """ gitfiti noun : Carefully crafted graffiti in a GitHub commit history calendar """ from datetime import datetime, timedelta import itertools import json import math try: # Python 3+ from urllib.error import HTTPError, URLError from urllib.request import urlopen except ImportError: # Python 2 from urllib2 import HTTPError, URLError, urlopen try: # Python 2 raw_input except NameError: # Python 3 (Python 2's `raw_input` was renamed to `input`) raw_input = input GITHUB_BASE_URL = 'https://github.com/' FALLBACK_IMAGE = 'kitty' TITLE = ''' _ __ _____ __ _ ____ _(_) /_/ __(_) /_(_) / __ `/ / __/ /_/ / __/ / / /_/ / / /_/ __/ / /_/ / \__, /_/\__/_/ /_/\__/_/ /____/ ''' KITTY = [ [0,0,0,4,0,0,0,0,4,0,0,0], [0,0,4,2,4,4,4,4,2,4,0,0], [0,0,4,2,2,2,2,2,2,4,0,0], [2,2,4,2,4,2,2,4,2,4,2,2], [0,0,4,2,2,3,3,2,2,4,0,0], [2,2,4,2,2,2,2,2,2,4,2,2], [0,0,0,3,4,4,4,4,3,0,0,0], ] ONEUP = [ [0,4,4,4,4,4,4,4,0], [4,3,2,2,1,2,2,3,4], [4,2,2,1,1,1,2,2,4], [4,3,4,4,4,4,4,3,4], [4,4,1,4,1,4,1,4,4], [0,4,1,1,1,1,1,4,0], [0,0,4,4,4,4,4,0,0], ] ONEUP2 = [ [0,0,4,4,4,4,4,4,4,0,0], [0,4,2,2,1,1,1,2,2,4,0], [4,3,2,2,1,1,1,2,2,3,4], [4,3,3,4,4,4,4,4,3,3,4], [0,4,4,1,4,1,4,1,4,4,0], [0,0,4,1,1,1,1,1,4,0,0], [0,0,0,4,4,4,4,4,0,0,0], ] HACKERSCHOOL = [ [4,4,4,4,4,4], [4,3,3,3,3,4], [4,1,3,3,1,4], [4,3,3,3,3,4], [4,4,4,4,4,4], [0,0,4,4,0,0], [4,4,4,4,4,4], ] OCTOCAT = [ [0,0,0,4,0,0,0,4,0], [0,0,4,4,4,4,4,4,4], [0,0,4,1,3,3,3,1,4], [4,0,3,4,3,3,3,4,3], [0,4,0,0,4,4,4,0,0], [0,0,4,4,4,4,4,4,4], [0,0,4,0,4,0,4,0,4], ] OCTOCAT2 = [ [0,0,4,0,0,4,0], [0,4,4,4,4,4,4], [0,4,1,3,3,1,4], [0,4,4,4,4,4,4], [4,0,0,4,4,0,0], [0,4,4,4,4,4,0], [0,0,0,4,4,4,0], ] HELLO = [ [0,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,4], [0,2,0,0,0,0,0,0,0,2,0,2,0,0,0,0,0,4], [0,3,3,3,0,2,3,3,0,3,0,3,0,1,3,1,0,3], [0,4,0,4,0,4,0,4,0,4,0,4,0,4,0,4,0,3], [0,3,0,3,0,3,3,3,0,3,0,3,0,3,0,3,0,2], [0,2,0,2,0,2,0,0,0,2,0,2,0,2,0,2,0,0], [0,1,0,1,0,1,1,1,0,1,0,1,0,1,1,1,0,4], ] HIREME = [ [1,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [3,3,3,0,2,0,3,3,3,0,2,3,3,0,0,3,3,0,3,0,0,2,3,3], [4,0,4,0,4,0,4,0,0,0,4,0,4,0,0,4,0,4,0,4,0,4,0,4], [3,0,3,0,3,0,3,0,0,0,3,3,3,0,0,3,0,3,0,3,0,3,3,3], [2,0,2,0,2,0,2,0,0,0,2,0,0,0,0,2,0,2,0,2,0,2,0,0], [1,0,1,0,1,0,1,0,0,0,1,1,1,0,0,1,0,1,0,1,0,1,1,1], ] ASCII_TO_NUMBER = { '_': 0, '_': 1, '~': 2, '=': 3, '*': 4, } def str_to_sprite(content): # Break out lines and filter any excess lines = content.split('\n') def is_empty_line(line): return len(line) != 0 lines = filter(is_empty_line, lines) # Break up lines into each character split_lines = [list(line) for line in lines] # Replace each character with its numeric equivalent for line in split_lines: for index, char in enumerate(line): line[index] = ASCII_TO_NUMBER.get(char, 0) # Return the formatted str return split_lines ONEUP_STR = str_to_sprite(''' ******* *=~~-~~=* *~~---~~* *=*****=* **-*-*-** *-----* ***** ''') IMAGES = { 'kitty': KITTY, 'oneup': ONEUP, 'oneup2': ONEUP2, 'hackerschool': HACKERSCHOOL, 'octocat': OCTOCAT, 'octocat2': OCTOCAT2, 'hello': HELLO, 'hireme': HIREME, 'oneup_str': ONEUP_STR, } def load_images(img_names): """loads user images from given file(s)""" if img_names[0] == '': return {} for image_name in img_names: img = open(image_name) loaded_imgs = {} img_list = '' img_line = ' ' name = img.readline().replace('\n', '') name = name[1:] while True: img_line = img.readline() if img_line == '': break img_line.replace('\n', '') if img_line[0] == ':': loaded_imgs[name] = json.loads(img_list) name = img_line[1:] img_list = '' else: img_list += img_line loaded_imgs[name] = json.loads(img_list) return loaded_imgs def retrieve_contributions_calendar(username, base_url): """retrieves the GitHub commit calendar data for a username""" base_url = base_url + 'users/' + username try: url = base_url + '/contributions' page = urlopen(url) except (HTTPError, URLError) as e: print('There was a problem fetching data from {0}'.format(url)) print(e) raise SystemExit return page.read().decode('utf-8') def parse_contributions_calendar(contributions_calendar): """Yield daily counts extracted from the contributions SVG.""" for line in contributions_calendar.splitlines(): for day in line.split(): if 'data-count=' in day: commit = day.split('=')[1] commit = commit.strip('"') yield int(commit) def find_max_daily_commits(contributions_calendar): """finds the highest number of commits in one day""" daily_counts = parse_contributions_calendar(contributions_calendar) return max(daily_counts) def calculate_multiplier(max_commits): """calculates a multiplier to scale GitHub colors to commit history""" m = max_commits / 4.0 if m == 0: return 1 m = math.ceil(m) m = int(m) return m def get_start_date(): """returns a datetime object for the first sunday after one year ago today at 12:00 noon""" today = datetime.today() date = datetime(today.year - 2, today.month, today.day, 12) weekday = datetime.weekday(date) while weekday < 6: date = date + timedelta(1) weekday = datetime.weekday(date) return date def generate_next_dates(start_date, offset=0): """generator that returns the next date, requires a datetime object as input. The offset is in weeks""" start = offset * 7 for i in itertools.count(start): yield start_date + timedelta(i) def generate_values_in_date_order(image, multiplier=1): height = 7 width = len(image[0]) for w in range(width): for h in range(height): yield image[h][w] * multiplier def commit(commitdate): template = ( '''GIT_AUTHOR_DATE={0} GIT_COMMITTER_DATE={1} ''' '''git commit --allow-empty -m "gitfiti" > /dev/null\n''' ) return template.format(commitdate.isoformat(), commitdate.isoformat()) def fake_it(image, start_date, username, repo, git_url, offset=0, multiplier=1): template = ( '#!/bin/bash\n' 'REPO={0}\n' 'git init $REPO\n' 'cd $REPO\n' 'touch README.md\n' 'git add README.md\n' 'touch gitfiti\n' 'git add gitfiti\n' '{1}\n' 'git remote add origin {2}:{3}/$REPO.git\n' 'git pull origin master\n' 'git push -u origin master\n' ) strings = [] for value, date in zip(generate_values_in_date_order(image, multiplier), generate_next_dates(start_date, offset)): for _ in range(value): strings.append(commit(date)) return template.format(repo, ''.join(strings), git_url, username) def save(output, filename): """Saves the list to a given filename""" with open(filename, 'w') as f: f.write(output) def request_user_input(prompt='> '): """Request input from the user and return what has been entered.""" return raw_input(prompt) def main(): print(TITLE) ghe = request_user_input( 'Enter GitHub URL (leave blank to use {}): '.format(GITHUB_BASE_URL)) username = request_user_input('Enter your GitHub username: ') git_base = ghe if ghe else GITHUB_BASE_URL contributions_calendar = retrieve_contributions_calendar(username, git_base) max_daily_commits = find_max_daily_commits(contributions_calendar) m = calculate_multiplier(max_daily_commits) repo = request_user_input( 'Enter the name of the repository to use by gitfiti: ') offset = request_user_input( 'Enter the number of weeks to offset the image (from the left): ') offset = int(offset) if offset.strip() else 0 print(( 'By default gitfiti.py matches the darkest pixel to the highest\n' 'number of commits found in your GitHub commit/activity calendar,\n' '\n' 'Currently this is: {0} commits\n' '\n' 'Enter the word "gitfiti" to exceed your max\n' '(this option generates WAY more commits)\n' 'Any other input will cause the default matching behavior' ).format(max_daily_commits)) match = request_user_input() match = m if (match == 'gitfiti') else 1 print('Enter file(s) to load images from (blank if not applicable)') img_names = request_user_input().split(' ') loaded_images = load_images(img_names) images = dict(IMAGES, **loaded_images) print('Enter the image name to gitfiti') print('Images: ' + ', '.join(images.keys())) image = request_user_input() image_name_fallback = FALLBACK_IMAGE if not image: image = IMAGES[image_name_fallback] else: try: image = images[image] except: image = IMAGES[image_name_fallback] start_date = get_start_date() fake_it_multiplier = m * match if not ghe: git_url = 'git@github.com' else: git_url = request_user_input('Enter Git URL like git@site.github.com: ') output = fake_it(image, start_date, username, repo, git_url, offset, fake_it_multiplier) save(output, 'gitfiti.sh') print('gitfiti.sh saved.') print('Create a new(!) repo at {0}new and run the script'.format(git_base)) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- """ 3D Least-Squares Inversion of DC and IP Data ============================================ Here we invert 5 lines of DC and IP data to recover both an electrical conductivity and a chargeability model. We formulate the corresponding inverse problems as least-squares optimization problems. For this tutorial, we focus on the following: - Generating a mesh based on survey geometry - Including surface topography - Defining the inverse problem (data misfit, regularization, directives) - Applying sensitivity weighting - Plotting the recovered model and data misfit The DC data are measured voltages normalized by the source current in V/A and the IP data are defined as apparent chargeabilities and V/V. """ ################################################################# # Import Modules # -------------- # import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import tarfile from discretize import TreeMesh from discretize.utils import mkvc, refine_tree_xyz from SimPEG.utils import surface2ind_topo, model_builder from SimPEG.utils.io_utils.io_utils_electromagnetics import read_dcip_xyz from SimPEG import ( maps, data, data_misfit, regularization, optimization, inverse_problem, inversion, directives, utils, ) from SimPEG.electromagnetics.static import resistivity as dc from SimPEG.electromagnetics.static import induced_polarization as ip from SimPEG.electromagnetics.static.utils.static_utils import ( apparent_resistivity_from_voltage, ) # To plot DC/IP data in 3D, the user must have the plotly package try: import plotly from SimPEG.electromagnetics.static.utils.static_utils import plot_3d_pseudosection has_plotly = True except: has_plotly = False pass try: from pymatsolver import Pardiso as Solver except ImportError: from SimPEG import SolverLU as Solver mpl.rcParams.update({"font.size": 16}) # sphinx_gallery_thumbnail_number = 7 ########################################################## # Define File Names # ----------------- # # Here we provide the file paths to assets we need to run the inversion. The # path to the true model conductivity and chargeability models are also # provided for comparison with the inversion results. These files are stored as a # tar-file on our google cloud bucket: # "https://storage.googleapis.com/simpeg/doc-assets/dcip3d.tar.gz" # # # # storage bucket where we have the data data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcip3d.tar.gz" # download the data downloaded_data = utils.download(data_source, overwrite=True) # unzip the tarfile tar = tarfile.open(downloaded_data, "r") tar.extractall() tar.close() # path to the directory containing our data dir_path = downloaded_data.split(".")[0] + os.path.sep # files to work with topo_filename = dir_path + "topo_xyz.txt" dc_data_filename = dir_path + "dc_data.xyz" ip_data_filename = dir_path + "ip_data.xyz" ######################################################## # Load Data and Topography # ------------------------ # # Here we load the observed data and topography. # # topo_xyz = np.loadtxt(str(topo_filename)) dc_data = read_dcip_xyz( dc_data_filename, "volt", data_header="V/A", uncertainties_header="UNCERT", is_surface_data=False, ) ip_data = read_dcip_xyz( ip_data_filename, "apparent_chargeability", data_header="APP_CHG", uncertainties_header="UNCERT", is_surface_data=False, ) ########################################################## # Plot Observed DC Data in Pseudosection # -------------------------------------- # # Here we plot the observed DC data in 3D pseudosection. # To use this utility, you must have Python's *plotly* package. # Here, we represent the DC data as apparent conductivities. # # Convert predicted data to apparent conductivities apparent_conductivity = 1 / apparent_resistivity_from_voltage( dc_data.survey, dc_data.dobs, ) if has_plotly: # Plot DC Data fig = plot_3d_pseudosection( dc_data.survey, apparent_conductivity, scale="log", units="S/m" ) fig.update_layout( title_text="Apparent Conductivity", title_x=0.5, title_font_size=24, width=650, height=500, scene_camera=dict( center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.5, y=-1.5, z=1.8) ), ) plotly.io.show(fig) else: print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS") ########################################################## # Plot Observed IP Data in Pseudosection # -------------------------------------- # # Here we plot the observed IP data in 3D pseudosection. # To use this utility, you must have Python's *plotly* package. # Here, we represent the IP data as apparent chargeabilities. # if has_plotly: # Plot IP Data fig = plot_3d_pseudosection( ip_data.survey, ip_data.dobs, scale="linear", units="V/V", vlim=[0, np.max(ip_data.dobs)], marker_opts={"colorscale": "plasma"}, ) fig.update_layout( title_text="Apparent Chargeability", title_x=0.5, title_font_size=24, width=650, height=500, scene_camera=dict( center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.5, y=-1.5, z=1.8) ), ) plotly.io.show(fig) else: print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS") #################################################### # Assign Uncertainties # -------------------- # # Inversion with SimPEG requires that we define the uncertainties on our data. # This represents our estimate of the standard deviation of the # noise in our data. For DC data, the uncertainties are 10% of the absolute value. # For IP data, the uncertainties are 5e-3 V/V. # # dc_data.standard_deviation = 0.1 * np.abs(dc_data.dobs) ip_data.standard_deviation = 5e-3 * np.ones_like(ip_data.dobs) ################################################################ # Create Tree Mesh # ---------------- # # Here, we create the Tree mesh that will be used to invert both DC # resistivity and IP data. # dh = 25.0 # base cell width dom_width_x = 6000.0 # domain width x dom_width_y = 6000.0 # domain width y dom_width_z = 4000.0 # domain width z nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x nbcy = 2 ** int(np.round(np.log(dom_width_y / dh) / np.log(2.0))) # num. base cells y nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z # Define the base mesh hx = [(dh, nbcx)] hy = [(dh, nbcy)] hz = [(dh, nbcz)] mesh = TreeMesh([hx, hy, hz], x0="CCN") # Mesh refinement based on topography k = np.sqrt(np.sum(topo_xyz[:, 0:2] ** 2, axis=1)) < 1200 mesh = refine_tree_xyz( mesh, topo_xyz[k, :], octree_levels=[0, 6, 8], method="surface", finalize=False ) # Mesh refinement near sources and receivers. electrode_locations = np.r_[ dc_data.survey.locations_a, dc_data.survey.locations_b, dc_data.survey.locations_m, dc_data.survey.locations_n, ] unique_locations = np.unique(electrode_locations, axis=0) mesh = refine_tree_xyz( mesh, unique_locations, octree_levels=[4, 6, 4], method="radial", finalize=False ) # Finalize the mesh mesh.finalize() ####################################################### # Project Electrodes to Discretized Topography # -------------------------------------------- # # It is important that electrodes are not modeled as being in the air. Even if the # electrodes are properly located along surface topography, they may lie above # the discretized topography. This step is carried out to ensure all electrodes # lie on the discretized surface. # # Find cells that lie below surface topography ind_active = surface2ind_topo(mesh, topo_xyz) # Extract survey from data object dc_survey = dc_data.survey ip_survey = ip_data.survey # Shift electrodes to the surface of discretized topography dc_survey.drape_electrodes_on_topography(mesh, ind_active, option="top") ip_survey.drape_electrodes_on_topography(mesh, ind_active, option="top") # Reset survey in data object dc_data.survey = dc_survey ip_data.survey = ip_survey ################################################################# # Starting/Reference Model and Mapping on OcTree Mesh # --------------------------------------------------- # # Here, we create starting and/or reference models for the DC inversion as # well as the mapping from the model space to the active cells. Starting and # reference models can be a constant background value or contain a-priori # structures. Here, the starting model is the natural log of 0.01 S/m. # # # Define conductivity model in S/m (or resistivity model in Ohm m) air_conductivity = np.log(1e-8) background_conductivity = np.log(1e-2) # Define the mapping from active cells to the entire domain active_map = maps.InjectActiveCells(mesh, ind_active, np.exp(air_conductivity)) nC = int(ind_active.sum()) # Define the mapping from the model to the conductivity of the entire domain conductivity_map = active_map * maps.ExpMap() # Define starting model starting_conductivity_model = background_conductivity * np.ones(nC) ############################################################### # Define the Physics of the DC Simulation # --------------------------------------- # # Here, we define the physics of the DC resistivity simulation. # # dc_simulation = dc.simulation.Simulation3DNodal( mesh, survey=dc_survey, sigmaMap=conductivity_map, solver=Solver, storeJ=True ) ################################################################# # Define DC Inverse Problem # ------------------------- # # The inverse problem is defined by 3 things: # # 1) Data Misfit: a measure of how well our recovered model explains the field data # 2) Regularization: constraints placed on the recovered model and a priori information # 3) Optimization: the numerical approach used to solve the inverse problem # # # Define the data misfit. Here the data misfit is the L2 norm of the weighted # residual between the observed data and the data predicted for a given model. # Within the data misfit, the residual between predicted and observed data are # normalized by the data's standard deviation. dc_data_misfit = data_misfit.L2DataMisfit(data=dc_data, simulation=dc_simulation) # Define the regularization (model objective function) dc_regularization = regularization.Simple( mesh, indActive=ind_active, mref=starting_conductivity_model, ) dc_regularization.mrefInSmooth = True # Include reference model in smoothness # Define how the optimization problem is solved. dc_optimization = optimization.InexactGaussNewton(maxIter=15, maxIterCG=30, tolCG=1e-2) # Here we define the inverse problem that is to be solved dc_inverse_problem = inverse_problem.BaseInvProblem( dc_data_misfit, dc_regularization, dc_optimization ) ################################################# # Define DC Inversion Directives # ------------------------------ # # Here we define any directives that are carried out during the inversion. This # includes the cooling schedule for the trade-off parameter (beta), stopping # criteria for the inversion and saving inversion results at each iteration. # # # Apply and update sensitivity weighting as the model updates update_sensitivity_weighting = directives.UpdateSensitivityWeights() # Defining a starting value for the trade-off parameter (beta) between the data # misfit and the regularization. starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) # Set the rate of reduction in trade-off parameter (beta) each time the # the inverse problem is solved. And set the number of Gauss-Newton iterations # for each trade-off paramter value. beta_schedule = directives.BetaSchedule(coolingFactor=2.5, coolingRate=2) # Options for outputting recovered models and predicted data for each beta. save_iteration = directives.SaveOutputEveryIteration(save_txt=False) # Setting a stopping criteria for the inversion. target_misfit = directives.TargetMisfit(chifact=1) # Apply and update preconditioner as the model updates update_jacobi = directives.UpdatePreconditioner() directives_list = [ update_sensitivity_weighting, starting_beta, beta_schedule, save_iteration, target_misfit, update_jacobi, ] ######################################################### # Running the DC Inversion # ------------------------ # # To define the inversion object, we need to define the inversion problem and # the set of directives. We can then run the inversion. # # Here we combine the inverse problem and the set of directives dc_inversion = inversion.BaseInversion( dc_inverse_problem, directiveList=directives_list ) # Run inversion recovered_conductivity_model = dc_inversion.run(starting_conductivity_model) ############################################################### # Recreate True Conductivity Model # -------------------------------- # background_value = 1e-2 conductor_value = 1e-1 resistor_value = 1e-3 true_conductivity_model = background_value * np.ones(nC) ind_conductor = model_builder.getIndicesSphere( np.r_[-350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] ) true_conductivity_model[ind_conductor] = conductor_value ind_resistor = model_builder.getIndicesSphere( np.r_[350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] ) true_conductivity_model[ind_resistor] = resistor_value true_conductivity_model_log10 = np.log10(true_conductivity_model) ############################################################### # Plotting True and Recovered Conductivity Model # ---------------------------------------------- # # Plot True Model fig = plt.figure(figsize=(10, 4)) plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) ax1 = fig.add_axes([0.15, 0.15, 0.67, 0.75]) mesh.plotSlice( plotting_map * true_conductivity_model_log10, ax=ax1, normal="Y", ind=int(len(mesh.hy) / 2), grid=False, clim=(true_conductivity_model_log10.min(), true_conductivity_model_log10.max()), pcolor_opts={"cmap": mpl.cm.viridis}, ) ax1.set_title("True Conductivity Model") ax1.set_xlabel("x (m)") ax1.set_ylabel("z (m)") ax1.set_xlim([-1000, 1000]) ax1.set_ylim([-1000, 0]) ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) norm = mpl.colors.Normalize( vmin=true_conductivity_model_log10.min(), vmax=true_conductivity_model_log10.max() ) cbar = mpl.colorbar.ColorbarBase( ax2, cmap=mpl.cm.viridis, norm=norm, orientation="vertical", format="$10^{%.1f}$" ) cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12) # Plot recovered model recovered_conductivity_model_log10 = np.log10(np.exp(recovered_conductivity_model)) fig = plt.figure(figsize=(10, 4)) ax1 = fig.add_axes([0.15, 0.15, 0.67, 0.75]) mesh.plotSlice( plotting_map * recovered_conductivity_model_log10, ax=ax1, normal="Y", ind=int(len(mesh.hy) / 2), grid=False, clim=(true_conductivity_model_log10.min(), true_conductivity_model_log10.max()), pcolor_opts={"cmap": mpl.cm.viridis}, ) ax1.set_title("Recovered Conductivity Model") ax1.set_xlabel("x (m)") ax1.set_ylabel("z (m)") ax1.set_xlim([-1000, 1000]) ax1.set_ylim([-1000, 0]) ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) norm = mpl.colors.Normalize( vmin=true_conductivity_model_log10.min(), vmax=true_conductivity_model_log10.max() ) cbar = mpl.colorbar.ColorbarBase( ax2, cmap=mpl.cm.viridis, norm=norm, orientation="vertical", format="$10^{%.1f}$" ) cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12) plt.show() ####################################################################### # Plotting Normalized Data Misfit or Predicted DC Data # ---------------------------------------------------- # # To see how well the recovered model reproduces the observed data, # it is a good idea to compare the predicted and observed data. # Here, we accomplish this by plotting the normalized misfit. # # Predicted data from recovered model dpred_dc = dc_inverse_problem.dpred # Compute the normalized data misfit dc_normalized_misfit = (dc_data.dobs - dpred_dc) / dc_data.standard_deviation if has_plotly: # Plot IP Data fig = plot_3d_pseudosection( dc_data.survey, dc_normalized_misfit, scale="linear", units="", vlim=[-2, 2], plane_distance=15, ) fig.update_layout( title_text="Normalized Data Misfit", title_x=0.5, title_font_size=24, width=650, height=500, scene_camera=dict( center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.5, y=-1.5, z=1.8) ), ) plotly.io.show(fig) else: print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS") ################################################################ # Starting/Reference Model for IP Inversion # ----------------------------------------- # # Here, we would create starting and/or reference models for the IP inversion as # well as the mapping from the model space to the active cells. Starting and # reference models can be a constant background value or contain a-priori # structures. Here, the starting model is the 1e-6 V/V. # # # Define chargeability model in V/V air_chargeability = 0.0 background_chargeability = 1e-6 active_map = maps.InjectActiveCells(mesh, ind_active, air_chargeability) nC = int(ind_active.sum()) chargeability_map = active_map # Define starting model starting_chargeability_model = background_chargeability * np.ones(nC) ######################################################### # Define the Physics of the IP Simulation # --------------------------------------- # # Here, we define the physics of the IP problem. For the chargeability, we # require a mapping from the model space to the entire mesh. For the background # conductivity/resistivity, we require the conductivity/resistivity on the # entire mesh. # # ip_simulation = ip.simulation.Simulation3DNodal( mesh, survey=ip_survey, etaMap=chargeability_map, sigma=conductivity_map * recovered_conductivity_model, solver=Solver, storeJ=True, ) ################################################# # Define IP Inverse Problem # ------------------------- # # Here we define the inverse problem in the same manner as the DC inverse problem. # # Define the data misfit (Here we use weighted L2-norm) ip_data_misfit = data_misfit.L2DataMisfit(data=ip_data, simulation=ip_simulation) # Define the regularization (model objective function) ip_regularization = regularization.Simple( mesh, indActive=ind_active, mapping=maps.IdentityMap(nP=nC), alpha_s=0.01, alpha_x=1, alpha_y=1, alpha_z=1, ) # Define how the optimization problem is solved. ip_optimization = optimization.ProjectedGNCG( maxIter=15, lower=0.0, upper=10, maxIterCG=30, tolCG=1e-2 ) # Here we define the inverse problem that is to be solved ip_inverse_problem = inverse_problem.BaseInvProblem( ip_data_misfit, ip_regularization, ip_optimization ) ####################################################### # Define IP Inversion Directives # ------------------------------ # # Here we define the directives in the same manner as the DC inverse problem. # update_sensitivity_weighting = directives.UpdateSensitivityWeights(threshold=1e-3) starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e2) beta_schedule = directives.BetaSchedule(coolingFactor=2.5, coolingRate=1) save_iteration = directives.SaveOutputEveryIteration(save_txt=False) target_misfit = directives.TargetMisfit(chifact=1.0) update_jacobi = directives.UpdatePreconditioner() directives_list = [ update_sensitivity_weighting, starting_beta, beta_schedule, save_iteration, target_misfit, update_jacobi, ] ############################################## # Running the IP Inversion # ------------------------ # # Here we combine the inverse problem and the set of directives ip_inversion = inversion.BaseInversion( ip_inverse_problem, directiveList=directives_list ) # Run inversion recovered_chargeability_model = ip_inversion.run(starting_chargeability_model) ################################################################ # Recreate True Chargeability Model # --------------------------------- # background_value = 1e-6 chargeable_value = 1e-1 true_chargeability_model = background_value * np.ones(nC) ind_chargeable = model_builder.getIndicesSphere( np.r_[-350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] ) true_chargeability_model[ind_chargeable] = chargeable_value ################################################################ # Plot True and Recovered Chargeability Model # -------------------------------------------- # # Plot True Model fig = plt.figure(figsize=(10, 4)) plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) ax1 = fig.add_axes([0.15, 0.15, 0.67, 0.75]) mesh.plotSlice( plotting_map * true_chargeability_model, ax=ax1, normal="Y", ind=int(len(mesh.hy) / 2), grid=False, clim=(true_chargeability_model.min(), true_chargeability_model.max()), pcolor_opts={"cmap": mpl.cm.plasma}, ) ax1.set_title("True Chargeability Model") ax1.set_xlabel("x (m)") ax1.set_ylabel("z (m)") ax1.set_xlim([-1000, 1000]) ax1.set_ylim([-1000, 0]) ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) norm = mpl.colors.Normalize( vmin=true_chargeability_model.min(), vmax=true_chargeability_model.max() ) cbar = mpl.colorbar.ColorbarBase( ax2, cmap=mpl.cm.plasma, norm=norm, orientation="vertical", format="%.2f" ) cbar.set_label("Intrinsic Chargeability [V/V]", rotation=270, labelpad=15, size=12) # Plot Recovered Model fig = plt.figure(figsize=(10, 4)) ax1 = fig.add_axes([0.15, 0.15, 0.67, 0.75]) mesh.plotSlice( plotting_map * recovered_chargeability_model, ax=ax1, normal="Y", ind=int(len(mesh.hy) / 2), grid=False, clim=(true_chargeability_model.min(), true_chargeability_model.max()), pcolor_opts={"cmap": mpl.cm.plasma}, ) ax1.set_title("Recovered Chargeability Model") ax1.set_xlabel("x (m)") ax1.set_ylabel("z (m)") ax1.set_xlim([-1000, 1000]) ax1.set_ylim([-1000, 0]) ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) norm = mpl.colors.Normalize( vmin=true_chargeability_model.min(), vmax=true_chargeability_model.max() ) cbar = mpl.colorbar.ColorbarBase( ax2, cmap=mpl.cm.plasma, norm=norm, orientation="vertical", format="%.2f" ) cbar.set_label("Intrinsic Chargeability [V/V]", rotation=270, labelpad=15, size=12) plt.show() ########################################################## # Plotting Normalized Data Misfit or Predicted IP Data # ---------------------------------------------------- # # Predicted data from recovered model dpred_ip = ip_inverse_problem.dpred # Normalized misfit ip_normalized_misfit = (ip_data.dobs - dpred_ip) / ip_data.standard_deviation if has_plotly: fig = plot_3d_pseudosection( ip_data.survey, ip_normalized_misfit, scale="linear", units="", vlim=[-2, 2], plane_distance=15, marker_opts={"colorscale": "plasma"}, ) fig.update_layout( title_text="Normalized Data Misfit", title_x=0.5, title_font_size=24, width=650, height=500, scene_camera=dict( center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.5, y=-1.5, z=1.8) ), ) plotly.io.show(fig) else: print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS")
""" This class is defined to override standard pickle functionality The goals of it follow: -Serialize lambdas and nested functions to compiled byte code -Deal with main module correctly -Deal with other non-serializable objects It does not include an unpickler, as standard python unpickling suffices. This module was extracted from the `cloud` package, developed by `PiCloud, Inc. <http://www.picloud.com>`_, and as modified for Apache Spark (licensed under ASL 2.0). Copyright (c) 2012, Regents of the University of California. Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of California, Berkeley nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # flake8: noqa from copy_reg import _extension_registry from functools import partial import dis import itertools import operator import os import pickle import struct import sys import traceback import types import logging cloudLog = logging.getLogger("Cloud.Transport") #relevant opcodes STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL')) DELETE_GLOBAL = chr(dis.opname.index('DELETE_GLOBAL')) LOAD_GLOBAL = chr(dis.opname.index('LOAD_GLOBAL')) GLOBAL_OPS = [STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL] HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT) EXTENDED_ARG = chr(dis.EXTENDED_ARG) try: from cStringIO import StringIO except ImportError: from StringIO import StringIO # These helper functions were copied from PiCloud's util module. def islambda(func): return getattr(func,'func_name') == '<lambda>' def xrange_params(xrangeobj): """Returns a 3 element tuple describing the xrange start, step, and len respectively Note: Only guarentees that elements of xrange are the same. parameters may be different. e.g. xrange(1,1) is interpretted as xrange(0,0); both behave the same though w/ iteration """ xrange_len = len(xrangeobj) if not xrange_len: #empty return (0,1,0) start = xrangeobj[0] if xrange_len == 1: #one element return start, 1, 1 return (start, xrangeobj[1] - xrangeobj[0], xrange_len) #debug variables intended for developer use: printSerialization = False printMemoization = False useForcedImports = True #Should I use forced imports for tracking? class CloudPickler(pickle.Pickler): dispatch = pickle.Pickler.dispatch.copy() savedForceImports = False def __init__(self, file, protocol=None, min_size_to_save= 0): pickle.Pickler.__init__(self,file,protocol) self.modules = set() #set of modules needed to depickle self.globals_ref = {} # map ids to dictionary. used to ensure that functions can share global env def dump(self, obj): # note: not thread safe # minimal side-effects, so not fixing recurse_limit = 3000 base_recurse = sys.getrecursionlimit() if base_recurse < recurse_limit: sys.setrecursionlimit(recurse_limit) self.inject_addons() try: return pickle.Pickler.dump(self, obj) except RuntimeError, e: if 'recursion' in e.args[0]: msg = """Could not pickle object as excessively deep recursion required. Try _fast_serialization=2 or contact PiCloud support""" raise pickle.PicklingError(msg) finally: new_recurse = sys.getrecursionlimit() if new_recurse == recurse_limit: sys.setrecursionlimit(base_recurse) def save_buffer(self, obj): """Fallback to save_string""" pickle.Pickler.save_string(self,str(obj)) dispatch[buffer] = save_buffer #block broken objects def save_unsupported(self, obj, pack=None): raise pickle.PicklingError("Cannot pickle objects of type %s" % type(obj)) dispatch[types.GeneratorType] = save_unsupported #python2.6+ supports slice pickling. some py2.5 extensions might as well. We just test it try: slice(0,1).__reduce__() except TypeError: #can't pickle - dispatch[slice] = save_unsupported #itertools objects do not pickle! for v in itertools.__dict__.values(): if type(v) is type: dispatch[v] = save_unsupported def save_dict(self, obj): """hack fix If the dict is a global, deal with it in a special way """ #print 'saving', obj if obj is __builtins__: self.save_reduce(_get_module_builtins, (), obj=obj) else: pickle.Pickler.save_dict(self, obj) dispatch[pickle.DictionaryType] = save_dict def save_module(self, obj, pack=struct.pack): """ Save a module as an import """ #print 'try save import', obj.__name__ self.modules.add(obj) self.save_reduce(subimport,(obj.__name__,), obj=obj) dispatch[types.ModuleType] = save_module #new type def save_codeobject(self, obj, pack=struct.pack): """ Save a code object """ #print 'try to save codeobj: ', obj args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) self.save_reduce(types.CodeType, args, obj=obj) dispatch[types.CodeType] = save_codeobject #new type def save_function(self, obj, name=None, pack=struct.pack): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """ write = self.write name = obj.__name__ modname = pickle.whichmodule(obj, name) #print 'which gives %s %s %s' % (modname, obj, name) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None if themodule: self.modules.add(themodule) if getattr(themodule, name, None) is obj: return self.save_global(obj, name) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if islambda(obj) or obj.func_code.co_filename == '<stdin>' or themodule is None: #Force server to import modules that have been imported in main modList = None if themodule is None and not self.savedForceImports: mainmod = sys.modules['__main__'] if useForcedImports and hasattr(mainmod,'___pyc_forcedImports__'): modList = list(mainmod.___pyc_forcedImports__) self.savedForceImports = True self.save_function_tuple(obj, modList) return else: # func is nested klass = getattr(themodule, name, None) if klass is None or klass is not obj: self.save_function_tuple(obj, [themodule]) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) dispatch[types.FunctionType] = save_function def save_function_tuple(self, func, forced_imports): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ save = self.save write = self.write # save the modules (if any) if forced_imports: write(pickle.MARK) save(_modules_to_main) #print 'forced imports are', forced_imports forced_names = map(lambda m: m.__name__, forced_imports) save((forced_names,)) #save((forced_imports,)) write(pickle.REDUCE) write(pickle.POP_MARK) code, f_globals, defaults, closure, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects # create a skeleton function object and memoize it save(_make_skel_func) save((code, closure, base_globals)) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function save(f_globals) save(defaults) save(dct) write(pickle.TUPLE) write(pickle.REDUCE) # applies _fill_function on the tuple @staticmethod def extract_code_globals(co): """ Find all globals names read or written to by codeblock co """ code = co.co_code names = co.co_names out_names = set() n = len(code) i = 0 extended_arg = 0 while i < n: op = code[i] i = i+1 if op >= HAVE_ARGUMENT: oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg extended_arg = 0 i = i+2 if op == EXTENDED_ARG: extended_arg = oparg*65536L if op in GLOBAL_OPS: out_names.add(names[oparg]) #print 'extracted', out_names, ' from ', names if co.co_consts: # see if nested function have any global refs for const in co.co_consts: if type(const) is types.CodeType: out_names |= CloudPickler.extract_code_globals(const) return out_names def extract_func_data(self, func): """ Turn the function into a tuple of data necessary to recreate it: code, globals, defaults, closure, dict """ code = func.func_code # extract all global ref's func_global_refs = CloudPickler.extract_code_globals(code) # process all variables referenced by global environment f_globals = {} for var in func_global_refs: #Some names, such as class functions are not global - we don't need them if func.func_globals.has_key(var): f_globals[var] = func.func_globals[var] # defaults requires no processing defaults = func.func_defaults def get_contents(cell): try: return cell.cell_contents except ValueError, e: #cell is empty error on not yet assigned raise pickle.PicklingError('Function to be pickled has free variables that are referenced before assignment in enclosing scope') # process closure if func.func_closure: closure = map(get_contents, func.func_closure) else: closure = [] # save the dict dct = func.func_dict if printSerialization: outvars = ['code: ' + str(code) ] outvars.append('globals: ' + str(f_globals)) outvars.append('defaults: ' + str(defaults)) outvars.append('closure: ' + str(closure)) print 'function ', func, 'is extracted to: ', ', '.join(outvars) base_globals = self.globals_ref.get(id(func.func_globals), {}) self.globals_ref[id(func.func_globals)] = base_globals return (code, f_globals, defaults, closure, dct, base_globals) def save_builtin_function(self, obj): if obj.__module__ is "__builtin__": return self.save_global(obj) return self.save_function(obj) dispatch[types.BuiltinFunctionType] = save_builtin_function def save_global(self, obj, name=None, pack=struct.pack): write = self.write memo = self.memo if name is None: name = obj.__name__ modname = getattr(obj, "__module__", None) if modname is None: modname = pickle.whichmodule(obj, name) try: __import__(modname) themodule = sys.modules[modname] except (ImportError, KeyError, AttributeError): #should never occur raise pickle.PicklingError( "Can't pickle %r: Module %s cannot be found" % (obj, modname)) if modname == '__main__': themodule = None if themodule: self.modules.add(themodule) sendRef = True typ = type(obj) #print 'saving', obj, typ try: try: #Deal with case when getattribute fails with exceptions klass = getattr(themodule, name) except (AttributeError): if modname == '__builtin__': #new.* are misrepeported modname = 'new' __import__(modname) themodule = sys.modules[modname] try: klass = getattr(themodule, name) except AttributeError, a: # print themodule, name, obj, type(obj) raise pickle.PicklingError("Can't pickle builtin %s" % obj) else: raise except (ImportError, KeyError, AttributeError): if typ == types.TypeType or typ == types.ClassType: sendRef = False else: #we can't deal with this raise else: if klass is not obj and (typ == types.TypeType or typ == types.ClassType): sendRef = False if not sendRef: #note: Third party types might crash this - add better checks! d = dict(obj.__dict__) #copy dict proxy to a dict if not isinstance(d.get('__dict__', None), property): # don't extract dict that are properties d.pop('__dict__',None) d.pop('__weakref__',None) # hack as __new__ is stored differently in the __dict__ new_override = d.get('__new__', None) if new_override: d['__new__'] = obj.__new__ self.save_reduce(type(obj),(obj.__name__,obj.__bases__, d),obj=obj) #print 'internal reduce dask %s %s' % (obj, d) return if self.proto >= 2: code = _extension_registry.get((modname, name)) if code: assert code > 0 if code <= 0xff: write(pickle.EXT1 + chr(code)) elif code <= 0xffff: write("%c%c%c" % (pickle.EXT2, code&0xff, code>>8)) else: write(pickle.EXT4 + pack("<i", code)) return write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) dispatch[types.ClassType] = save_global dispatch[types.TypeType] = save_global def save_instancemethod(self, obj): #Memoization rarely is ever useful due to python bounding self.save_reduce(types.MethodType, (obj.im_func, obj.im_self,obj.im_class), obj=obj) dispatch[types.MethodType] = save_instancemethod def save_inst_logic(self, obj): """Inner logic to save instance. Based off pickle.save_inst Supports __transient__""" cls = obj.__class__ memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ #remove items if transient if hasattr(obj, '__transient__'): transient = obj.__transient__ stuff = stuff.copy() for k in list(stuff.keys()): if k in transient: del stuff[k] else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD) def save_inst(self, obj): # Hack to detect PIL Image instances without importing Imaging # PIL can be loaded with multiple names, so we don't check sys.modules for it if hasattr(obj,'im') and hasattr(obj,'palette') and 'Image' in obj.__module__: self.save_image(obj) else: self.save_inst_logic(obj) dispatch[types.InstanceType] = save_inst def save_property(self, obj): # properties not correctly saved in python self.save_reduce(property, (obj.fget, obj.fset, obj.fdel, obj.__doc__), obj=obj) dispatch[property] = save_property def save_itemgetter(self, obj): """itemgetter serializer (needed for namedtuple support)""" class Dummy: def __getitem__(self, item): return item items = obj(Dummy()) if not isinstance(items, tuple): items = (items, ) return self.save_reduce(operator.itemgetter, items) if type(operator.itemgetter) is type: dispatch[operator.itemgetter] = save_itemgetter def save_attrgetter(self, obj): """attrgetter serializer""" class Dummy(object): def __init__(self, attrs, index=None): self.attrs = attrs self.index = index def __getattribute__(self, item): attrs = object.__getattribute__(self, "attrs") index = object.__getattribute__(self, "index") if index is None: index = len(attrs) attrs.append(item) else: attrs[index] = ".".join([attrs[index], item]) return type(self)(attrs, index) attrs = [] obj(Dummy(attrs)) return self.save_reduce(operator.attrgetter, tuple(attrs)) if type(operator.attrgetter) is type: dispatch[operator.attrgetter] = save_attrgetter def save_reduce(self, func, args, state=None, listitems=None, dictitems=None, obj=None): """Modified to support __transient__ on new objects Change only affects protocol level 2 (which is always used by PiCloud""" # Assert that args is a tuple or None if not isinstance(args, types.TupleType): raise pickle.PicklingError("args from reduce() should be a tuple") # Assert that func is callable if not hasattr(func, '__call__'): raise pickle.PicklingError("func from reduce should be callable") save = self.save write = self.write # Protocol 2 special case: if func's name is __newobj__, use NEWOBJ if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__": #Added fix to allow transient cls = args[0] if not hasattr(cls, "__new__"): raise pickle.PicklingError( "args[0] from __newobj__ args has no __new__") if obj is not None and cls is not obj.__class__: raise pickle.PicklingError( "args[0] from __newobj__ args has the wrong class") args = args[1:] save(cls) #Don't pickle transient entries if hasattr(obj, '__transient__'): transient = obj.__transient__ state = state.copy() for k in list(state.keys()): if k in transient: del state[k] save(args) write(pickle.NEWOBJ) else: save(func) save(args) write(pickle.REDUCE) if obj is not None: self.memoize(obj) # More new special cases (that work with older protocols as # well): when __reduce__ returns a tuple with 4 or 5 items, # the 4th and 5th item should be iterators that provide list # items and dict items (as (key, value) tuples), or None. if listitems is not None: self._batch_appends(listitems) if dictitems is not None: self._batch_setitems(dictitems) if state is not None: #print 'obj %s has state %s' % (obj, state) save(state) write(pickle.BUILD) def save_xrange(self, obj): """Save an xrange object in python 2.5 Python 2.6 supports this natively """ range_params = xrange_params(obj) self.save_reduce(_build_xrange,range_params) #python2.6+ supports xrange pickling. some py2.5 extensions might as well. We just test it try: xrange(0).__reduce__() except TypeError: #can't pickle -- use PiCloud pickler dispatch[xrange] = save_xrange def save_partial(self, obj): """Partial objects do not serialize correctly in python2.x -- this fixes the bugs""" self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords)) if sys.version_info < (2,7): #2.7 supports partial pickling dispatch[partial] = save_partial def save_file(self, obj): """Save a file""" import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute if not hasattr(obj, 'name') or not hasattr(obj, 'mode'): raise pickle.PicklingError("Cannot pickle files that do not map to an actual file") if obj is sys.stdout: return self.save_reduce(getattr, (sys,'stdout'), obj=obj) if obj is sys.stderr: return self.save_reduce(getattr, (sys,'stderr'), obj=obj) if obj is sys.stdin: raise pickle.PicklingError("Cannot pickle standard input") if hasattr(obj, 'isatty') and obj.isatty(): raise pickle.PicklingError("Cannot pickle files that map to tty objects") if 'r' not in obj.mode: raise pickle.PicklingError("Cannot pickle files that are not opened for reading") name = obj.name try: fsize = os.stat(name).st_size except OSError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name) if obj.closed: #create an empty closed string io retval = pystringIO.StringIO("") retval.close() elif not fsize: #empty file retval = pystringIO.StringIO("") try: tmpfile = file(name) tst = tmpfile.read(1) except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) tmpfile.close() if tst != '': raise pickle.PicklingError("Cannot pickle file %s as it does not appear to map to a physical, real file" % name) else: try: tmpfile = file(name) contents = tmpfile.read() tmpfile.close() except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) retval = pystringIO.StringIO(contents) curloc = obj.tell() retval.seek(curloc) retval.name = name self.save(retval) #save stringIO self.memoize(obj) dispatch[file] = save_file """Special functions for Add-on libraries""" def inject_numpy(self): numpy = sys.modules.get('numpy') if not numpy or not hasattr(numpy, 'ufunc'): return self.dispatch[numpy.ufunc] = self.__class__.save_ufunc numpy_tst_mods = ['numpy', 'scipy.special'] def save_ufunc(self, obj): """Hack function for saving numpy ufunc objects""" name = obj.__name__ for tst_mod_name in self.numpy_tst_mods: tst_mod = sys.modules.get(tst_mod_name, None) if tst_mod: if name in tst_mod.__dict__: self.save_reduce(_getobject, (tst_mod_name, name)) return raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in' % str(obj)) def inject_email(self): """Block email LazyImporters from being saved""" email = sys.modules.get('email') if not email: return self.dispatch[email.LazyImporter] = self.__class__.save_unsupported def inject_addons(self): """Plug in system. Register additional pickling functions if modules already loaded""" self.inject_numpy() self.inject_email() """Python Imaging Library""" def save_image(self, obj): if not obj.im and obj.fp and 'r' in obj.fp.mode and obj.fp.name \ and not obj.fp.closed and (not hasattr(obj, 'isatty') or not obj.isatty()): #if image not loaded yet -- lazy load self.save_reduce(_lazyloadImage,(obj.fp,), obj=obj) else: #image is loaded - just transmit it over self.save_reduce(_generateImage, (obj.size, obj.mode, obj.tostring()), obj=obj) """ def memoize(self, obj): pickle.Pickler.memoize(self, obj) if printMemoization: print 'memoizing ' + str(obj) """ # Shorthands for legacy support def dump(obj, file, protocol=2): CloudPickler(file, protocol).dump(obj) def dumps(obj, protocol=2): file = StringIO() cp = CloudPickler(file,protocol) cp.dump(obj) #print 'cloud dumped', str(obj), str(cp.modules) return file.getvalue() #hack for __import__ not working as desired def subimport(name): __import__(name) return sys.modules[name] # restores function attributes def _restore_attr(obj, attr): for key, val in attr.items(): setattr(obj, key, val) return obj def _get_module_builtins(): return pickle.__builtins__ def print_exec(stream): ei = sys.exc_info() traceback.print_exception(ei[0], ei[1], ei[2], None, stream) def _modules_to_main(modList): """Force every module in modList to be placed into main""" if not modList: return main = sys.modules['__main__'] for modname in modList: if type(modname) is str: try: mod = __import__(modname) except Exception, i: #catch all... sys.stderr.write('warning: could not import %s\n. Your function may unexpectedly error due to this import failing; \ A version mismatch is likely. Specific error was:\n' % modname) print_exec(sys.stderr) else: setattr(main,mod.__name__, mod) else: #REVERSE COMPATIBILITY FOR CLOUD CLIENT 1.5 (WITH EPD) #In old version actual module was sent setattr(main,modname.__name__, modname) #object generators: def _build_xrange(start, step, len): """Built xrange explicitly""" return xrange(start, start + step*len, step) def _genpartial(func, args, kwds): if not args: args = () if not kwds: kwds = {} return partial(func, *args, **kwds) def _fill_function(func, globals, defaults, dict): """ Fills in the rest of function data into the skeleton function object that were created via _make_skel_func(). """ func.func_globals.update(globals) func.func_defaults = defaults func.func_dict = dict return func def _make_cell(value): return (lambda: value).func_closure[0] def _reconstruct_closure(values): return tuple([_make_cell(v) for v in values]) def _make_skel_func(code, closures, base_globals = None): """ Creates a skeleton function object that contains just the provided code and the correct number of cells in func_closure. All other func attributes (e.g. func_globals) are empty. """ closure = _reconstruct_closure(closures) if closures else None if base_globals is None: base_globals = {} base_globals['__builtins__'] = __builtins__ return types.FunctionType(code, base_globals, None, None, closure) """Constructors for 3rd party libraries Note: These can never be renamed due to client compatibility issues""" def _getobject(modname, attribute): mod = __import__(modname, fromlist=[attribute]) return mod.__dict__[attribute] def _generateImage(size, mode, str_rep): """Generate image from string representation""" import Image i = Image.new(mode, size) i.fromstring(str_rep) return i def _lazyloadImage(fp): import Image fp.seek(0) #works in almost any case return Image.open(fp)
"""Weather data coordinator for the OpenWeatherMap (OWM) service.""" from datetime import timedelta import logging import async_timeout from pyowm.commons.exceptions import APIRequestError, UnauthorizedError from homeassistant.components.weather import ( ATTR_CONDITION_CLEAR_NIGHT, ATTR_CONDITION_SUNNY, ATTR_FORECAST_CONDITION, ATTR_FORECAST_PRECIPITATION, ATTR_FORECAST_PRECIPITATION_PROBABILITY, ATTR_FORECAST_PRESSURE, ATTR_FORECAST_TEMP, ATTR_FORECAST_TEMP_LOW, ATTR_FORECAST_TIME, ATTR_FORECAST_WIND_BEARING, ATTR_FORECAST_WIND_SPEED, ) from homeassistant.helpers import sun from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from homeassistant.util import dt from .const import ( ATTR_API_CLOUDS, ATTR_API_CONDITION, ATTR_API_DEW_POINT, ATTR_API_FEELS_LIKE_TEMPERATURE, ATTR_API_FORECAST, ATTR_API_HUMIDITY, ATTR_API_PRECIPITATION_KIND, ATTR_API_PRESSURE, ATTR_API_RAIN, ATTR_API_SNOW, ATTR_API_TEMPERATURE, ATTR_API_UV_INDEX, ATTR_API_WEATHER, ATTR_API_WEATHER_CODE, ATTR_API_WIND_BEARING, ATTR_API_WIND_SPEED, CONDITION_CLASSES, DOMAIN, FORECAST_MODE_DAILY, FORECAST_MODE_HOURLY, FORECAST_MODE_ONECALL_DAILY, FORECAST_MODE_ONECALL_HOURLY, WEATHER_CODE_SUNNY_OR_CLEAR_NIGHT, ) _LOGGER = logging.getLogger(__name__) WEATHER_UPDATE_INTERVAL = timedelta(minutes=10) class WeatherUpdateCoordinator(DataUpdateCoordinator): """Weather data update coordinator.""" def __init__(self, owm, latitude, longitude, forecast_mode, hass): """Initialize coordinator.""" self._owm_client = owm self._latitude = latitude self._longitude = longitude self._forecast_mode = forecast_mode self._forecast_limit = None if forecast_mode == FORECAST_MODE_DAILY: self._forecast_limit = 15 super().__init__( hass, _LOGGER, name=DOMAIN, update_interval=WEATHER_UPDATE_INTERVAL ) async def _async_update_data(self): data = {} with async_timeout.timeout(20): try: weather_response = await self._get_owm_weather() data = self._convert_weather_response(weather_response) except (APIRequestError, UnauthorizedError) as error: raise UpdateFailed(error) from error return data async def _get_owm_weather(self): """Poll weather data from OWM.""" if ( self._forecast_mode == FORECAST_MODE_ONECALL_HOURLY or self._forecast_mode == FORECAST_MODE_ONECALL_DAILY ): weather = await self.hass.async_add_executor_job( self._owm_client.one_call, self._latitude, self._longitude ) else: weather = await self.hass.async_add_executor_job( self._get_legacy_weather_and_forecast ) return weather def _get_legacy_weather_and_forecast(self): """Get weather and forecast data from OWM.""" interval = self._get_forecast_interval() weather = self._owm_client.weather_at_coords(self._latitude, self._longitude) forecast = self._owm_client.forecast_at_coords( self._latitude, self._longitude, interval, self._forecast_limit ) return LegacyWeather(weather.weather, forecast.forecast.weathers) def _get_forecast_interval(self): """Get the correct forecast interval depending on the forecast mode.""" interval = "daily" if self._forecast_mode == FORECAST_MODE_HOURLY: interval = "3h" return interval def _convert_weather_response(self, weather_response): """Format the weather response correctly.""" current_weather = weather_response.current forecast_weather = self._get_forecast_from_weather_response(weather_response) return { ATTR_API_TEMPERATURE: current_weather.temperature("celsius").get("temp"), ATTR_API_FEELS_LIKE_TEMPERATURE: current_weather.temperature("celsius").get( "feels_like" ), ATTR_API_DEW_POINT: self._fmt_dewpoint(current_weather.dewpoint), ATTR_API_PRESSURE: current_weather.pressure.get("press"), ATTR_API_HUMIDITY: current_weather.humidity, ATTR_API_WIND_BEARING: current_weather.wind().get("deg"), ATTR_API_WIND_SPEED: current_weather.wind().get("speed"), ATTR_API_CLOUDS: current_weather.clouds, ATTR_API_RAIN: self._get_rain(current_weather.rain), ATTR_API_SNOW: self._get_snow(current_weather.snow), ATTR_API_PRECIPITATION_KIND: self._calc_precipitation_kind( current_weather.rain, current_weather.snow ), ATTR_API_WEATHER: current_weather.detailed_status, ATTR_API_CONDITION: self._get_condition(current_weather.weather_code), ATTR_API_UV_INDEX: current_weather.uvi, ATTR_API_WEATHER_CODE: current_weather.weather_code, ATTR_API_FORECAST: forecast_weather, } def _get_forecast_from_weather_response(self, weather_response): forecast_arg = "forecast" if self._forecast_mode == FORECAST_MODE_ONECALL_HOURLY: forecast_arg = "forecast_hourly" elif self._forecast_mode == FORECAST_MODE_ONECALL_DAILY: forecast_arg = "forecast_daily" return [ self._convert_forecast(x) for x in getattr(weather_response, forecast_arg) ] def _convert_forecast(self, entry): forecast = { ATTR_FORECAST_TIME: dt.utc_from_timestamp( entry.reference_time("unix") ).isoformat(), ATTR_FORECAST_PRECIPITATION: self._calc_precipitation( entry.rain, entry.snow ), ATTR_FORECAST_PRECIPITATION_PROBABILITY: ( round(entry.precipitation_probability * 100) ), ATTR_FORECAST_PRESSURE: entry.pressure.get("press"), ATTR_FORECAST_WIND_SPEED: entry.wind().get("speed"), ATTR_FORECAST_WIND_BEARING: entry.wind().get("deg"), ATTR_FORECAST_CONDITION: self._get_condition( entry.weather_code, entry.reference_time("unix") ), ATTR_API_CLOUDS: entry.clouds, } temperature_dict = entry.temperature("celsius") if "max" in temperature_dict and "min" in temperature_dict: forecast[ATTR_FORECAST_TEMP] = entry.temperature("celsius").get("max") forecast[ATTR_FORECAST_TEMP_LOW] = entry.temperature("celsius").get("min") else: forecast[ATTR_FORECAST_TEMP] = entry.temperature("celsius").get("temp") return forecast @staticmethod def _fmt_dewpoint(dewpoint): if dewpoint is not None: return round(dewpoint / 100, 1) return None @staticmethod def _get_rain(rain): """Get rain data from weather data.""" if "all" in rain: return round(rain["all"], 2) if "1h" in rain: return round(rain["1h"], 2) return 0 @staticmethod def _get_snow(snow): """Get snow data from weather data.""" if snow: if "all" in snow: return round(snow["all"], 2) if "1h" in snow: return round(snow["1h"], 2) return 0 @staticmethod def _calc_precipitation(rain, snow): """Calculate the precipitation.""" rain_value = 0 if WeatherUpdateCoordinator._get_rain(rain) != 0: rain_value = WeatherUpdateCoordinator._get_rain(rain) snow_value = 0 if WeatherUpdateCoordinator._get_snow(snow) != 0: snow_value = WeatherUpdateCoordinator._get_snow(snow) return round(rain_value + snow_value, 2) @staticmethod def _calc_precipitation_kind(rain, snow): """Determine the precipitation kind.""" if WeatherUpdateCoordinator._get_rain(rain) != 0: if WeatherUpdateCoordinator._get_snow(snow) != 0: return "Snow and Rain" return "Rain" if WeatherUpdateCoordinator._get_snow(snow) != 0: return "Snow" return "None" def _get_condition(self, weather_code, timestamp=None): """Get weather condition from weather data.""" if weather_code == WEATHER_CODE_SUNNY_OR_CLEAR_NIGHT: if timestamp: timestamp = dt.utc_from_timestamp(timestamp) if sun.is_up(self.hass, timestamp): return ATTR_CONDITION_SUNNY return ATTR_CONDITION_CLEAR_NIGHT return [k for k, v in CONDITION_CLASSES.items() if weather_code in v][0] class LegacyWeather: """Class to harmonize weather data model for hourly, daily and One Call APIs.""" def __init__(self, current_weather, forecast): """Initialize weather object.""" self.current = current_weather self.forecast = forecast
# coding: utf-8 # In[1]: # # LF test with noise # C = S + N, where noise N = sigma^2 I, where I is the identity matrix # # We generate 20 values of parameters x for function logLF(x) # We chose our parameters around the value of Boltzmann code generated C_3 # CAMB generated C_3 equals 5.88275e-10 # # 1. Set all C_l = 0 except C_3 DONE # # 2. LF is based on a matrix C that only has a P_3 term. DONE # # 3. Add to that matrix a white noise term (sigma^2 on the diagonal). DONE # # 4. LF now has *exactly two* free parameters, C_3 and sigma^2. DONE # # 5. What is LF vs C_3 (at, perhaps, a couple values of sigma^2)? # # 6. over-plot |a_3m|^2 values # # # In[ ]: # In[ ]: # In[ ]: # In[2]: get_ipython().magic(u'matplotlib inline') import math import matplotlib.pyplot as plt import numpy as np import healpy as hp import pyfits as pf import astropy as ap import os from scipy.special import eval_legendre ##special scipy function # In[3]: # # Review of Likelihood Formalism: # # -2 loglikelihood is # -2 ln L \propto m^T C^-1 m + lnDet C # where C = S + N # We are working with noiseless maps, N = 0, so C = S # # In real space, # data: the temperature map, # parameters: theoretical CAMB generated C_l, C^theory_l # # m = array of temperature pixels # S = S_ij # N = diagonal noise (but here it is noiseless! N=0) # # In spherical harmonic space # data: the Healpix anafast outputarray of a_lm (or similarly, \hat{C}_l ) # parameters: the theoretical C_l # # m = a_lm coefficients # S is diagonal (C_2, C_3, etc.) # N is non-sparse matrix (but here it is noiseless! N=0) # # # NOTE: The correct standard is to systematically remove the monopole, dipole terms l=0,l=1 # Also, we use in the following lmax = 2*nside # In[4]: cd ~/Desktop/CMBintheLikeHoodz/Likelihood_Comparison # In[5]: camb1 = "camb_nside16_lmax32_alms.fits" camb2 = "camb_nside16_lmax32_map.fits" camb3 = "camb_nside16_lmax32_scalcls.fits" planck1 = "100GHz_nside16_lmax32_cls.fits" planck2 = "100GHz_nside16_lmax32_cmb_alm.fits" planck3 = "100GHz_nside16_lmax32_sky_alm.fits" planck4 = "100GHz_nside16_lmax32_skymap.fits" nside = 16 # In[6]: npix = 12*(nside**2) #total number of pixels, npix LMAX = ((2*nside)) #maximum l of the power spectrum C_l heal_npix = hp.nside2npix(nside) # Healpix calculated npix print "The total number of pixels is " + str(npix) print "The maximum ell of the power spectrum C_l set to lmax = 2*nside " +str(LMAX) print "Healpix tells me total number of pixels npix is equal to " + str(heal_npix) # In[7]: # # Begin with a Munich Planck-simulated map, and CAMB Boltzmann-code generated C_l values # # In[8]: # Theoretical scalar C_l array, CAMB # # open a FITS file, theoretical C_l values generated by CAMB # type()=pyfits.hdu.hdulist.HDUList cl_open = pf.open(camb3) # recall camb3 = "camb_nside16_lmax32_scalcls.fits" # In[9]: theoryCls_arr1 = cl_open[1].data # print theoryCls_arr1[:10] # Recall there are four columns: temp, E pol, B pol, grad-temp cross terms # first two values are zero, i.e. monopole, dipole # XXX.field() references columns by 0-index # field(0) is temperature values # all Cl scalar temp values put into ndarray # type()=numpy.ndarray # In[10]: cltemps = theoryCls_arr1.field(0) #print cltemps #print "The length of the array of theoretical Cl's is " +str(len(cltemps)) #print "The array contains [C_0, C_1, C_2,..., C_" +str(len(cltemps)-1) + "]" #print type(cltemps)=numpy.ndarray # In[11]: # remove monopole l=0 and dipole l=1 theoryCl = cltemps[2:] # len(theoryCl) = 31 # print theoryCl # theoryCl is np.ndarray of theoretical [C_2, C_3, C_4, ..., C_32] # In[12]: # Our input data is Gaerching generated, noiseless full-sky map # Temperature map: here we use Planck simulated map from Munich, not CAMB map # http://gavo.mpa-garching.mpg.de/planck/ # # Read in with Healpy routine/function # # Use planck4 = "100GHz_nside16_lmax32_skymap.fits" # This is a simulated data, 100GHz (where CMB dominates), no foregrounds # mapread_camb2 = hp.read_map(camb2) # Healpix routine, input the sky map # In[13]: # hp.mollview(mapread_camb2) # visualization of full-sky CMB map, nside=16, lmax=32 # In[14]: # The uploaded temperature map is mapread_planck4 = hp.read_map(planck4) #print type(mapread_camb2) # type(mapread_planck4) = np.ndarray #print mapread_camb2.shape # mapread_planck4.shape = (3072, ) = (N_pix, ) # # rename array for convenience tempval = mapread_camb2 #print tempval # In[15]: # Next, we use healpy map2alm to tranform to alm values # Our input data is CAMB generated, noiseless full-sky map # We calculate an array of a_lm from this by using Healpix map2alm, a subroutine of anafast # # map2alm only outputs m >=0 values, because m = -l values are equivalent to m = +l values # # Using map2alm, the length of the alm array is expected to be: # (mmax * (2 * lmax + 1 - mmax)) / 2 + lmax + 1)" # # For mmax = lmax, this is l(l+1)/2 + l + 1 # i.e. # l = 0, there is 1 # l = 1, there is 3 # l = 2, there is 6 # l = 3, there is 10 # l = 4, there is 15 # etc. almarr = hp.map2alm(mapread_camb2) # This is an array of a_lm values #print "The array of spherical harmonic coefficients a_lm is" #print almarr #print "The arr.shape is " + str(almarr.shape) #print "The length of a_lm array is " + str(len(almarr)) # #print "For l=3, map2alm gives (a_00, a_10, a_11, a_20, a_21, a_22, a_30, a_31, a_32, a_33)" #print "However, this is NOT the order of the output! See below" # In the Fortran F90 subroutines, complex alm are stored in an array that has # two dimensions to contain coefficients for positive and negative m values. # Healpy doesn't do this....I think #print "=============================" #print "=============================" #print "Check indices with healpy.sphtfunc.Alm.getidx(lmax, l, m)" #print "Default ordering of healpy.map2alm() output is " #print "(0,0), (1,0), ..., (lmax, 0)," #print "(1,1), (2,1), ...., (lmax, 1)," #print "(2,2), .... (lmax, 2),(3,3), ...., (lmax, 3), etc. , .... (lmax, lmax)." # In[16]: # ========================== # DEMONSTRATION # Notice how a_lm is indexed # ========================== mmm = np.arange(12) # define a map, i.e. an array of 12 "pixels" lmaxxx = 4 alm = hp.map2alm(mmm, lmax=lmaxxx) # spherical harmonic transform lm = hp.map2alm(mmm, lmax=lmaxxx) # spherical harmonic transform #print(alm) #print(alm.shape) # So alm is actually a 1D vector. # How is alm indexed? l, m = hp.Alm.getlm(lmax=lmaxxx) #print(l) #print(m) #print "The l values are "+str(l) #print "The m values are "+str(m) #print " (l,m) is in order " +str(list(zip(l,m))) # # l, m = hp.Alm.getlm(lmax=lmax) # print(l) # [0 1 2 1 2 2] # print(m) # [0 0 0 1 1 2] # # # So, for l = 2, m is [0, 1, 2]. # # ========================== # Notice how a_lm is indexed # ========================== # # # # In[17]: # Check with healpy.sphtfunc.Alm.getidx(lmax, l, m) # Returns index corresponding to (l,m) in an array describing alm up to lmax. # ell, emm = hp.Alm.getlm(lmax=32) #print "len(ell) is " +str(len(ell)) #print "len(emm) is "+str(len(emm)) #print "l values are "+str(ell[:10]) #print "m values are "+str(emm[:10]) pairs = list(zip(ell, emm)) # put values together in pairs, zip() ellemm = np.vstack((ell,emm)).T # equivalent to list(zip(ell,emm)), but uses numpy throughout #print "Indices for a_lm for lmax (l, m) are:" #print str(pairs[:50]) # The expected output # In[18]: #print ellemm[:10] # In[19]: # # For our first test, mode l = 3, we need to access a_lm coefficients a_30, a_31, a_32, a_33 # To find this for lmax = 32, we use # healpy.sphtfunc.Alm.getidx(lmax, l, m) # Returns index corresponding to (l,m) in an array describing alm up to lmax. # # Find the indices index_a30 = hp.Alm.getidx(lmax=32, l=3, m=0) index_a31 = hp.Alm.getidx(lmax=32, l=3, m=1) index_a32 = hp.Alm.getidx(lmax=32, l=3, m=2) index_a33 = hp.Alm.getidx(lmax=32, l=3, m=3) # In[20]: #print "Index a_30 is " +str(index_a30) #print "Index a_31 is "+str(index_a31) #print "Index a_32 is "+str(index_a32) #print "Index a_33 is "+str(index_a33) # In[21]: # # Create an array with only the values a_3m, i.e. a_30, a_31, a_32, a_33 # # First convert the array of alm coefficients into a real # realalm = almarr.real # #print realalm[:36] # In[22]: empty_almlist = [] # a30 = realalm[3] a31 = realalm[35] a32 = realalm[66] a33 = realalm[96] # #print "a30 is " + str(a30) #print "a31 is " + str(a31) #print "a32 is " + str(a32) #print "a33 is " + str(a33) # #print str(pairs[3]) # Check with our output above #print str(pairs[35]) #print str(pairs[66]) #print str(pairs[96]) # empty_almlist.append(a30) empty_almlist.append(a31) empty_almlist.append(a32) empty_almlist.append(a33) # #print empty_almlist # In[23]: # create array of real-valued alm coefficients, a30 a31 a32 a33 realalm3 = np.asarray(empty_almlist) # np.asarray() converts input into an array #print realalm3 # In[24]: # Repeat the above procedure for mode l = 4, i.e. a40 a41 a42 a43 a44 # Find the indices index_a40 = hp.Alm.getidx(lmax=32, l=4, m=0) index_a41 = hp.Alm.getidx(lmax=32, l=4, m=1) index_a42 = hp.Alm.getidx(lmax=32, l=4, m=2) index_a43 = hp.Alm.getidx(lmax=32, l=4, m=3) index_a44 = hp.Alm.getidx(lmax=32, l=4, m=4) # #print "Index a_40 is " +str(index_a40) #print "Index a_41 is "+str(index_a41) #print "Index a_42 is "+str(index_a42) #print "Index a_43 is "+str(index_a43) #print "Index a_44 is "+str(index_a44) # # Check with the above ouput #print str(pairs[4]) #print str(pairs[36]) #print str(pairs[67]) #print str(pairs[97]) #print str(pairs[126]) # emptylistalm2 = [] # #print realalm # a40 = realalm[4] a41 = realalm[36] a42 = realalm[67] a43 = realalm[97] a44 = realalm[127] # #print "a40 is " + str(a40) #print "a41 is " + str(a41) #print "a42 is " + str(a42) #print "a43 is " + str(a43) #print "a44 is " + str(a44) # emptylistalm2.append(a40) emptylistalm2.append(a41) emptylistalm2.append(a42) emptylistalm2.append(a43) emptylistalm2.append(a44) # #print emptylistalm2 # In[25]: # create array of real-valued alm coefficients, a40 a41 a42 a43 a44 realalm4 = np.asarray(emptylistalm2) # np.asarray() converts input into an array #print realalm4 # In[26]: # Calculate (abs(alm))**2 i.e. |alm|^2 abs_alm3 = np.absolute(realalm3) abs_alm4 = np.absolute(realalm4) #print abs_alm3 #print abs_alm4 # Now calculate the squares element-wise, x**2 alm3_squared = abs_alm3**2 alm4_squared = abs_alm4**2 #print alm3_squared #print alm4_squared # In[27]: # For l = 3 test, we need theoretical value of C_3; ditto for l = 4 #print theoryCl C3 = theoryCl[1] #print "theory C_3 is " +str(C3) C4 = theoryCl[2] #print "theory C_4 is "+str(C4) # In[28]: # For lmax = 32, we must create an array of ell values, i.e. [0 1 2 3....31 32] ell = np.arange(33) #print ell # # Subtract the monopole and dipole, l=0, l=1 ellval = ell[2:] #print ellval # In[29]: # Calculate an array of (2*l + 1)C_l # i.e. 5*C_2, 7*C_3, 9*C_4, 11*C_5, 13*C_6, ... #print theoryCl for i in ellval: paramsCl = (2*ellval + 1)*theoryCl # define array (2*l + 1)C_l #print paramsCl # In[30]: norm = ((2*ellval + 1))/(4*math.pi) #print norm # In[31]: anafastCl = hp.anafast(mapread_camb2, lmax=32) #len(anafastCl) = 33 # remove monopole and dipole values, l=0, l=1 hatCl = anafastCl[2:] #len() = 31, type() = np.ndarray hatC3 = hatCl[1] # index 0 = C2, 1 = C3, etc. hatC4 = hatCl[2] #print hatC3 #print hatC4 # In[32]: # # Add a_lm squared, |a_lm|^2 # #print "The values for |a_lm|^2 are : " #print "For |a_3m|**2 such that a_30, a_31, a_32, a_33: " #print str(alm3_squared) #print "And for |a_4m|**2 such that a_40, a_41, a_42, a_43, a_44: " #print str(alm4_squared) # In[33]: # ========================================================================= # # ========================================================================= # # Data: # tempval # the array of pixel values, (3072,) # realalm3 # array of alm values, a30, a31, a32, a33 # realalm4 # array of alm values, a40, a41, a42, a43, a44 # alm3_squared # array of |alm|^2, (abs(a3m))**2 # alm4_squared # array of |alm|^2, (abs(a4m))**2 # hatCl # array of anafast-calculated \hat{C}_l values, l=2 to l=32 # hatC3 # \hat{C}_3 value # hatC4 # \hat{C}_4 value # # Parameters: # theoryCl # array of Boltzmann code generated C_l, i.e. C^{theory}_l # paramsCl # array of (2*l + 1)C_l from l=2 to l=lmax # C3 # array of C_3 value # C4 # array of C_4 value # # Array of ell's: # ellval # array of l = 2 to l=lmax # # [2 3 4 ... 31 32] # norm # array of (2*l+1)/4pi # # [5/4pi 7/4pi 9/4pi 11/4pi ... 63/4pi 65/4pi] # ========================================================================= # # ========================================================================= # In[34]: # # Next, create the matrix, n_i /cdot n_j # solely using Healpy routines, i.e. taking the dot product of the vectors # The result is "dotproductmatrix" # # npix = 3072 # In[35]: totalpix = np.arange(npix) # An array indexing the total number of pixels # In[36]: ## healpy.pixelfunc.pix2vec(nside, ipix, nest=False) ## ## will give three arrays ## arrays of all x values, all y values, all z values ## RING scheme default # len()=3 # type()=tuple # # vecval = hp.pix2vec(nside, totalpix) #Nside = 16, type()=tuple # In[37]: vecvalx = vecval[0] #shape (3072,) vecvaly = vecval[1] vecvalz = vecval[2] # In[38]: # First arrange arrays vertically # numpy.vstack = Stack arrays in sequence vertically (row wise), input sequence of arrays totalvecval = np.vstack((vecvalx, vecvaly, vecvalz)) #type()=numpy.ndarray # In[39]: trans = totalvecval.T #transpose # In[40]: dotproductmatrix = trans.dot(totalvecval) #take the dot product # dotproductmatrix.shape = (npix, npix) = (3072, 3072) # type(dotproductmatrix) = np.ndarray # In[41]: # ========================================================= # ========================================================= # # \Sum_l (2*l + 1)/4pi C^th_l P_l (dotproductmatrix) # sum from l=2 to l=lmax # # arrays l = [2 3 4 .... lmax] # C_l = [C_2 C_3 .... C_lmax] # # The correct way to do the summation: # # Step 1: calculate the matrix # M = dotproductmatrix # # Step 2: evaluate the function P_l(x) for each entry of the matrix # OUTPUT: [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # # Step 3: (2*l +1)/4pi from l=2 to l=lmax # [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ] # # Step 4: multiply # [5/4pi*P_2(M) + 7/4pi*P_3(M) +...... + 65/4pi*P_32(M)] # # # Step 5: multiply by theoretical CAMB values, [C_2 C_3 C_31 C_32] # [5/4pi**C_2* P_2(M) + 7/4pi*C_3* P_3(M) +...... + 65/4pi*C_32* P_32(M)] # # Step 6: This is an array of S_ij for each theory C_l, l=2 to l=32 # # # # ========================================================= # ========================================================= # In[42]: # ========================================================= # ========================================================= # # Now calculate the likelihood # -2lnL \propto m^T C^-1 m + ln det C + N ln (2pi) # # First term, m^T C^-1 m is the "model fit term" # Second term, lndetC is the "complexity penalty" # Third term, N ln 2pi, a constant # # m = tempval # C = Sij # # STEP 1: do inverse of Sij # invSij = np.linalg.inv(Sij) # # STEP 2: do matrix mulplication, m.T*inv(C)*m # # lnL_modelfit_terms = np.array([np.dot(tempval.T , np.dot(invSij[i] , tempval) ) for i in range(invSij.shape[0])]) # # STEP 3: do logdet Sij # # logdetC = np.linalg.slogdet(Sij) #computes sign and log det C # logdetC[1] # # STEP 4: compute N_pix * 2pi # # Npix2pi = (npix)*2*math.pi # # Step 5: -2loglikelihood = m.T*inv(C)*m + logdetC[1] + Npix2pi # # ========================================================= # ========================================================= # In[43]: # CODE BOTTLENECK! # # Evaluate Legendre from l=2 to l=lmax for each matrix entry # [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # # WITHOUT BROADCASTING, one would do something like # PlMat = [] # for i in ellval: # PlMat.append( eval_legendre(i, dotproductmatrix) ) # # # With broadcasting, we use PlMat = eval_legendre(ellval[:, None, None], dotproductmatrix) # PlMat = [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # PlMat is an array, len()=31 of 31 3072 by 3072 matrices # PlMat.shape = (31, 3072, 3072) # In[44]: # multiply PlMat by (2*l+1)/4pi, i.e. norm norm_matrix = norm[:, None, None] * PlMat # [5/4pi * P_2(M) 7/4pi * P_3(M) .... 65/4pi * P_32(M)] #print PlMat.shape #print norm_matrix.shape # In[45]: ### Try l=2 to =3 ### Try l=3 ### could try l=0 plus 1, 2, 3 # In[ ]: # In[ ]: # In[46]: # JULY 1, 2015 # # # Here we define the LF # # Next we should writen the REAL-space log-likelihood, -2 lnLikefunction # this is a function of parameters # plot against parameters, theory C_l # # Likelihood function should use -2lnL /propto m^T C^-1 M + log det M # # In[47]: # TEST C_3 Likelihood # # The covariance matrix is a function of variable "x" where "x" is "C_3", an unknown parameter. # # Our covariance matrix is therefore S_ij = 7/4pi * x * P_3(matrix) # (We set l=3, i.e. l=3, and P_3) # # The LF is then a function of x, LF(x). This is the only parameter we vary. # # LF = -2loglikelihood /propto T^T inv(S_ij) T + log det (Sij) + N log (2pi) # # We then plot LF(x) vs. parameters x. # # In[48]: # define pixel-value arrays mT = np.matrix(tempval) # mT.shape = (1, 3072) m = np.matrix(tempval).T # m.shape = (3072, 1) Npix2pi = (npix)*2*math.pi # LF constant # In[49]: #print C3 # Boltzmann code CAMB output # In[50]: # generate a number of samples to plot for x # Boltzmann code value for C3 is 5.88275e-10 # start at 1e-10, end at 9e-10 # default samples generated is 50 # In[51]: vary_x_samples1 = np.linspace(1e-10, 9e-10, num=20 ) #set default num = 20 # In[52]: #print vary_x_samples1 # In[53]: # create Sij array, len()=31, l=2 to l=32 # Sij = norm_matrix * theoryCl[:, None, None] # [5/4pi*C_2*P_2(M) 7/4pi*C_3*P_3(M) .... 65/4pi*C_32*P_32(M)] # In[54]: #print hatC3 # anafast generated C_l for l = 3 from sky map #print C3 # CAMB generated C_3, Boltzmann code output # In[55]: #print alm3_squared # a_3m extracted from sky map, absolute value, squared # In[56]: np.set_printoptions(threshold=100000) # Default is threshold=1000 ## Use this to print all values, disables corner printing # In[57]: # # The empirical variance from our map is Var(a3m) # # print alm3_squared # [ 8.84771791e-13 8.06516529e-18 3.85347491e-12 7.58705140e-11] # # Initialize the noise parameter sigma^2 to these values # The range is wide; try 5e-11 to 5 e-15 # sigma2 = np.logspace(-12, -16, num=30 ) #set default num = 30 #print sigma2 # In[58]: # For N matrix, set the identity id_mat = np.identity(3072) #print id_mat # This is a (3072, 3072) matrix noiseresult = sigma2[:, None, None] * id_mat[None, :, :] #print noiseresult # In[59]: correctmatrix = norm_matrix[0] + norm_matrix[1] #print correctmatrix # In[60]: tempp = np.random.normal(0.0, 1.0, 3072) # mean = 0, std = 1 = var = 1 def LogLikehood_wNoise_contour(param, sig): # param is our parameter, C_3 Sij = param * correctmatrix[None, :, :] Nij = sig * id_mat[None, :, :] # Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l # Sij.shape = (20, 3072, 3072) Cij = Sij + Nij #invCij = np.linalg.inv(Cij) logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1] # model_fit_terms = m^T C^-1 m model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ]) return model_fit_terms + logdetC[1] + Npix2pi # In[62]: import pylab as pb import matplotlib.pyplot as plt vary_C3 = np.linspace(-0.04, 0.04, num=20) varysigma = np.linspace(0.8, 1.2, num=20) xxx = vary_C3 yyy = varysigma zzz = np.array([[LogLikehood_wNoise_contour(np.asarray(i), np.asarray(j)) for i in xxx] for j in yyy]) zzzreshaped = zzz.reshape(20,20) plt.figure() CS = plt.contour(xxx, yyy, zzzreshaped) plt.clabel(CS, inline=1, fontsize=10) plt.xlabel('C3 parameter, xmin = -0.04, xmax = 0.04') plt.ylabel('sigma^2 parameter, ymin= 0.8, ymax= 1.2') pb.show() plt.figure() levels = np.arange(22350.0, 22450.0, 10) CS = plt.contour(xxx, yyy, zzzreshaped, levels=levels) plt.clabel(CS, inline=1, fontsize=10) plt.xlabel('C3 parameter, xmin = -0.04, xmax = 0.04') plt.ylabel('sigma^2 parameter, ymin= 0.8, ymax= 1.2') pb.show() # In[ ]:
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Affine bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import linalg from tensorflow.contrib.distributions.python.ops import distribution_util from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector __all__ = [ "Affine", ] def _as_tensor(x, name): """Convenience to convert to `Tensor` or leave as `None`.""" return None if x is None else ops.convert_to_tensor(x, name=name) class Affine(bijector.Bijector): """Compute `Y = g(X; shift, scale) = scale @ X + shift`. Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`. In TF parlance, the `scale` term is logically equivalent to: ```python scale = ( scale_identity_multiplier * tf.diag(tf.ones(d)) + tf.diag(scale_diag) + scale_tril + scale_perturb_factor @ diag(scale_perturb_diag) @ tf.transpose([scale_perturb_factor]) ) ``` The `scale` term is applied without necessarily materializing constituent matrices, i.e., the matmul is [matrix-free]( https://en.wikipedia.org/wiki/Matrix-free_methods) when possible. Examples: ```python # Y = X b = Affine() # Y = X + shift b = Affine(shift=[1., 2, 3]) # Y = 2 * I @ X.T + shift b = Affine(shift=[1., 2, 3], scale_identity_multiplier=2.) # Y = tf.diag(d1) @ X.T + shift b = Affine(shift=[1., 2, 3], scale_diag=[-1., 2, 1]) # Implicitly 3x3. # Y = (I + v * v.T) @ X.T + shift b = Affine(shift=[1., 2, 3], scale_perturb_factor=[[1., 0], [0, 1], [1, 1]]) # Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift b = Affine(shift=[1., 2, 3], scale_diag=[1., 3, 3], # Implicitly 3x3. scale_perturb_diag=[2., 1], # Implicitly 2x2. scale_perturb_factor=[[1., 0], [0, 1], [1, 1]]) ``` """ def __init__(self, shift=None, scale_identity_multiplier=None, scale_diag=None, scale_tril=None, scale_perturb_factor=None, scale_perturb_diag=None, event_ndims=1, validate_args=False, name="affine"): """Instantiates the `Affine` bijector. This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments, giving the forward operation: ```none Y = g(X) = scale @ X + shift ``` where the `scale` term is logically equivalent to: ```python scale = ( scale_identity_multiplier * tf.diag(tf.ones(d)) + tf.diag(scale_diag) + scale_tril + scale_perturb_factor @ diag(scale_perturb_diag) @ tf.transpose([scale_perturb_factor]) ) ``` If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are specified then `scale += IdentityMatrix`. Otherwise specifying a `scale` argument has the semantics of `scale += Expand(arg)`, i.e., `scale_diag != None` means `scale += tf.diag(scale_diag)`. Args: shift: Floating-point `Tensor`. If this is set to `None`, no shift is applied. scale_identity_multiplier: floating point rank 0 `Tensor` representing a scaling done to the identity matrix. When `scale_identity_multiplier = scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added to `scale`. scale_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal matrix. When `None` no diagonal term is added to `scale`. scale_tril: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ... k, k], which represents a k x k lower triangular matrix. When `None` no `scale_tril` term is added to `scale`. The upper triangular elements above the diagonal are ignored. scale_perturb_factor: Floating-point `Tensor` representing factor matrix with last two dimensions of shape `(k, r)`. When `None`, no rank-r update is added to `scale`. scale_perturb_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which represents an `r x r` diagonal matrix. When `None` low rank updates will take the form `scale_perturb_factor * scale_perturb_factor.T`. event_ndims: Scalar `int` `Tensor` indicating the number of dimensions associated with a particular draw from the distribution. Must be 0 or 1. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: ValueError: if `perturb_diag` is specified but not `perturb_factor`. TypeError: if `shift` has different `dtype` from `scale` arguments. """ self._graph_parents = [] self._name = name self._validate_args = validate_args # Ambiguous definition of low rank update. if scale_perturb_diag is not None and scale_perturb_factor is None: raise ValueError("When scale_perturb_diag is specified, " "scale_perturb_factor must be specified.") # Special case, only handling a scaled identity matrix. We don't know its # dimensions, so this is special cased. # We don't check identity_multiplier, since below we set it to 1. if all # other scale args are None. self._is_only_identity_multiplier = (scale_tril is None and scale_diag is None and scale_perturb_factor is None) with self._name_scope("init", values=[ shift, scale_identity_multiplier, scale_diag, scale_tril, scale_perturb_diag, scale_perturb_factor]): event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims") event_ndims_const = tensor_util.constant_value(event_ndims) if event_ndims_const is not None and event_ndims_const not in (0, 1): raise ValueError("event_ndims(%s) was not 0 or 1" % event_ndims_const) else: if validate_args: # Shape tool will catch if event_ndims is negative. event_ndims = control_flow_ops.with_dependencies( [check_ops.assert_less( event_ndims, 2, message="event_ndims must be 0 or 1")], event_ndims) # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`. dtype = dtypes.float32 if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") dtype = shift.dtype.base_dtype self._shift = shift # When no args are specified, pretend the scale matrix is the identity # matrix. if (self._is_only_identity_multiplier and scale_identity_multiplier is None): scale_identity_multiplier = ops.convert_to_tensor(1., dtype=dtype) # self._create_scale_operator returns a LinearOperator in all cases # except if self._is_only_identity_multiplier; in which case it # returns a scalar Tensor. scale = self._create_scale_operator( identity_multiplier=scale_identity_multiplier, diag=scale_diag, tril=scale_tril, perturb_diag=scale_perturb_diag, perturb_factor=scale_perturb_factor, shift=shift, validate_args=validate_args) if scale.dtype is not None: dtype = scale.dtype.base_dtype if scale is not None and not self._is_only_identity_multiplier: if (shift is not None and shift.dtype.base_dtype != scale.dtype.base_dtype): raise TypeError( "shift.dtype({}) is incompatible with scale.dtype({}).".format( shift.dtype, scale.dtype)) if scale.tensor_rank is not None: batch_ndims = scale.tensor_rank - 2 else: batch_ndims = scale.tensor_rank_tensor() - 2 else: # We won't need shape inference when scale is None or when scale is a # scalar. batch_ndims = 0 self._scale = scale self._shaper = _DistributionShape( batch_ndims=batch_ndims, event_ndims=event_ndims, validate_args=validate_args) super(Affine, self).__init__( event_ndims=event_ndims, graph_parents=( [event_ndims] + [self._scale] if tensor_util.is_tensor(self._scale) else self._scale.graph_parents + [self._shift] if self._shift is not None else []), is_constant_jacobian=True, dtype=dtype, validate_args=validate_args, name=name) def _create_scale_operator(self, identity_multiplier, diag, tril, perturb_diag, perturb_factor, shift, validate_args): """Construct `scale` from various components. Args: identity_multiplier: floating point rank 0 `Tensor` representing a scaling done to the identity matrix. diag: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal matrix. tril: Floating-point `Tensor` representing the diagonal matrix. `scale_tril` has shape [N1, N2, ... k], which represents a k x k lower triangular matrix. perturb_diag: Floating-point `Tensor` representing the diagonal matrix of the low rank update. perturb_factor: Floating-point `Tensor` representing factor matrix. shift: Floating-point `Tensor` representing `shift in `scale @ X + shift`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Returns: scale. In the case of scaling by a constant, scale is a floating point `Tensor`. Otherwise, scale is a `LinearOperator`. Raises: ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`. """ identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier") diag = _as_tensor(diag, "diag") tril = _as_tensor(tril, "tril") perturb_diag = _as_tensor(perturb_diag, "perturb_diag") perturb_factor = _as_tensor(perturb_factor, "perturb_factor") # If possible, use the low rank update to infer the shape of # the identity matrix, when scale represents a scaled identity matrix # with a low rank update. shape_hint = None if perturb_factor is not None: shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2) if self._is_only_identity_multiplier: if validate_args: return control_flow_ops.with_dependencies( [check_ops.assert_none_equal( identity_multiplier, array_ops.zeros([], identity_multiplier.dtype), ["identity_multiplier should be non-zero."])], identity_multiplier) return identity_multiplier scale = distribution_util.make_tril_scale( loc=shift, scale_tril=tril, scale_diag=diag, scale_identity_multiplier=identity_multiplier, validate_args=validate_args, assert_positive=False, shape_hint=shape_hint) if perturb_factor is not None: return linalg.LinearOperatorUDVHUpdate( scale, u=perturb_factor, diag_update=perturb_diag, is_diag_update_positive=perturb_diag is None, is_non_singular=True, # Implied by is_positive_definite=True. is_self_adjoint=True, is_positive_definite=True, is_square=True) return scale @property def shift(self): """The `shift` `Tensor` in `Y = scale @ X + shift`.""" return self._shift @property def scale(self): """The `scale` `LinearOperator` in `Y = scale @ X + shift`.""" return self._scale def _forward(self, x): y = x if self._is_only_identity_multiplier: y *= self._scale if self.shift is not None: return y + self.shift return y y, sample_shape = self._shaper.make_batch_of_event_sample_matrices( y, expand_batch_dim=False) with ops.control_dependencies(self._maybe_check_scale() if self.validate_args else []): y = self.scale.matmul(y) y = self._shaper.undo_make_batch_of_event_sample_matrices( y, sample_shape, expand_batch_dim=False) if self.shift is not None: y += self.shift return y def _inverse(self, y): x = y if self.shift is not None: x -= self.shift if self._is_only_identity_multiplier: return x / self._scale x, sample_shape = self._shaper.make_batch_of_event_sample_matrices( x, expand_batch_dim=False) # Solve fails if the op is singular so we may safely skip this assertion. x = self.scale.solve(x) x = self._shaper.undo_make_batch_of_event_sample_matrices( x, sample_shape, expand_batch_dim=False) return x def _inverse_log_det_jacobian(self, y): return -self._forward_log_det_jacobian(y) def _forward_log_det_jacobian(self, x): if self._is_only_identity_multiplier: # We don't pad in this case and instead let the fldj be applied # via broadcast. d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype) one = ops.convert_to_tensor(1., self._scale.dtype) return math_ops.log(math_ops.abs(self._scale)) * array_ops.where( math_ops.equal(self._shaper.event_ndims, 0), one, d) return self.scale.log_abs_determinant() def _maybe_check_scale(self): try: return [self.scale.assert_non_singular()] except NotImplementedError: pass return []
#!/usr/bin/env python import datetime import random import numpy as np import math import copy from scipy import ndimage import networkx as nx import statsmodels.api as sm from .db import DB from .graphics import Graphics from .bee import Bee from .logdata import LogData class Experiment: def __init__(self, hive_id): self.hive_id = hive_id self.output_dir = '../results/' self.num_x_cells = 40 self.num_y_cells = 20 self.x_bins = 3840/self.num_x_cells self.y_bins = 2160/self.num_y_cells self.frames_per_window = 25 self.min_angle_speed = 30 self.tag_confidence_percentage = 0.8 self.min_tracked_for_classification = 100 self.min_time_tracked = 25 * 5 self.plotnum = 0 self.logger = LogData() hour_blocks_in_experiment = self.retrieve_hour_blocks_in_experiment(hive_id) day_hour_bins, night_hour_bins = self.group_hours_by_night_day(hour_blocks_in_experiment) self.day_grouped_bees = self.retrieve_bees_in_time_period(day_hour_bins) self.night_grouped_bees = self.retrieve_bees_in_time_period(night_hour_bins) @staticmethod def calc_distance(x1, y1, x2, y2): x_dist = (x2 - x1) y_dist = (y2 - y1) return math.sqrt(x_dist * x_dist + y_dist * y_dist) @staticmethod def absolute_angle_degree(x1, y1, x2, y2): deltax = x2 - x1 deltay = y2 - y1 rad = math.atan2(deltay, deltax) angle = math.degrees(rad) if angle < 0: angle += 360 return angle def retrieve_hour_blocks_in_experiment(self, hive_id): db = DB() query_statement = db.query_string(table='bees', cols=['HourBin'], distinct=True, where='HiveID={}'.format(hive_id)) hours_query_result = db.query(query_statement) db.close() experiment_hours = [] for hour_row in hours_query_result: experiment_hours.append(hour_row['HourBin']) experiment_hours.sort() return experiment_hours def group_hours_by_night_day(self, sorted_date_times): is_day = False night_hours = [] day_hours = [] current_hours_group = [] #print(sorted_date_times) for each_date_time in sorted_date_times: each_hour = each_date_time.time().hour #print(each_hour) if each_hour in [0, 1, 2, 3, 4, 5, 6, 19, 20, 21, 22, 23]: if is_day == True: day_hours.append(current_hours_group) is_day = False current_hours_group = [each_date_time] else: current_hours_group.append(each_date_time) else: if is_day == False: night_hours.append(current_hours_group) is_day = True current_hours_group = [each_date_time] else: current_hours_group.append(each_date_time) if is_day: day_hours.append(current_hours_group) else: night_hours.append(current_hours_group) return (day_hours, night_hours) def retrieve_bees_in_time_period(self, time_period_list_datetimes): group_bees = [] db = DB() #print(time_period_list_datetimes) for each_group_hours in time_period_list_datetimes: #print(each_group_hours, '\n') bees_in_time_group = [] query_statement = db.query_string(table='bees', cols=['BeeID', 'TagID', 'TagConfidence', 'LengthTracked'], group_condition='HourBin IN', group_list=[str(time) for time in each_group_hours]) #print(query_statement) hours_query_result = db.query(query_statement) for bee_row in hours_query_result:#[:100]: ##### change if bee_row['TagConfidence'] > self.tag_confidence_percentage: bee = Bee(bee_row['BeeID'], bee_row['TagID'], bee_row['LengthTracked']) bees_in_time_group.append(bee) else: bee = Bee(bee_row['BeeID'], 0, bee_row['LengthTracked']) bees_in_time_group.append(bee) group_bees.append(bees_in_time_group) db.close() return group_bees def calculate_day_night_metrics(self, day_num): print('Period', day_num) day_bees = self.day_grouped_bees[day_num] night_bees = self.night_grouped_bees[day_num] bee_id_dict, day_downsampled_bee_locations, night_downsampled_bee_locations = self.retrieve_process_bees(day_bees, night_bees) day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy = self.generate_heatmaps(day_bees, bee_id_dict, 'day_{}'.format(day_num)) night_spread_all_tracked_individuals, night_spread_all_tracked_all_xy, night_spread_min_tracked_individuals, night_spread_min_tracked_all_xy = self.generate_heatmaps(night_bees, bee_id_dict, 'night_{}'.format(day_num)) #print(day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy, '\n') day_mean_all_tracked_speeds, day_mean_min_tracked_speeds, day_median_all_tracked_speeds, day_median_min_tracked_speeds = self.generate_speeds(day_bees, bee_id_dict, 'day_{}'.format(day_num)) night_mean_all_tracked_speeds, night_mean_min_tracked_speeds, night_median_all_tracked_speeds, night_median_min_tracked_speeds = self.generate_speeds(night_bees, bee_id_dict, 'night_{}'.format(day_num)) day_percent_idle_all_tracked, day_percent_idle_min_tracked = self.idle_percentage(day_bees, bee_id_dict, 'day_{}'.format(day_num)) night_percent_idle_all_tracked, night_percent_idle_min_tracked = self.idle_percentage(night_bees, bee_id_dict, 'night_{}'.format(day_num)) day_list_node_degree, day_list_density, day_list_clustering = self.identify_relationships(day_downsampled_bee_locations, 'day_{}'.format(day_num)) night_list_node_degree, night_list_density, night_list_clustering = self.identify_relationships(night_downsampled_bee_locations, 'night_{}'.format(day_num)) #self.generate_angles(day_beeids, bee_id_dict, 'day_{}'.format(day_num)) #self.generate_angles(night_beeids, bee_id_dict, 'night_{}'.format(day_num)) self.logger.log_output(day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy, night_spread_all_tracked_individuals, night_spread_all_tracked_all_xy, night_spread_min_tracked_individuals, night_spread_min_tracked_all_xy, day_mean_all_tracked_speeds, day_mean_min_tracked_speeds, day_median_all_tracked_speeds, day_median_min_tracked_speeds, night_mean_all_tracked_speeds, night_mean_min_tracked_speeds, night_median_all_tracked_speeds, night_median_min_tracked_speeds, day_list_node_degree, day_list_density, day_list_clustering, night_list_node_degree, night_list_density, night_list_clustering, day_percent_idle_all_tracked, day_percent_idle_min_tracked, night_percent_idle_all_tracked, night_percent_idle_min_tracked, day_num, 'real') #test_circadian: True=shuffles day and night; False=shuffles tag types self.permutation_tests(day_bees, night_bees, day_downsampled_bee_locations, night_downsampled_bee_locations, bee_id_dict, day_num, 100, test_circadian=False) def permutation_tests(self, day_bees, night_bees, day_downsampled_bee_locations, night_downsampled_bee_locations, bee_id_d, day_num, num_iterations, test_circadian): combined_day_night_bees = day_bees + night_bees combined_locations_day_night_bees = day_downsampled_bee_locations + night_downsampled_bee_locations day_bees_grouped_by_tag = {0:[], 1:[], 2:[], 3:[]} night_bees_grouped_by_tag = {0:[], 1:[], 2:[], 3:[]} for each_bee in day_bees: bee_id = each_bee.bee_id bee_id = each_bee.bee_id bee = bee_id_d[bee_id] day_bees_grouped_by_tag[bee.tag_id].append(bee) for each_bee in night_bees: bee_id = each_bee.bee_id bee_id = each_bee.bee_id bee = bee_id_d[bee_id] night_bees_grouped_by_tag[bee.tag_id].append(bee) for i in range(num_iterations): bee_id_dict = copy.deepcopy(bee_id_d) if test_circadian: random.shuffle(combined_day_night_bees) shuffled_day_bees = combined_day_night_bees[:len(day_bees)] shuffled_night_bees = combined_day_night_bees[len(day_bees):] random.shuffle(combined_locations_day_night_bees) shuffled_day_locations_bees = combined_locations_day_night_bees[:len(day_downsampled_bee_locations)] shuffled_night_locations_bees = combined_locations_day_night_bees[len(day_downsampled_bee_locations):] else: random.shuffle(day_bees) random.shuffle(night_bees) shuffled_day_bees = day_bees shuffled_night_bees = night_bees shuffled_day_locations_bees = day_downsampled_bee_locations shuffled_night_locations_bees = night_downsampled_bee_locations num_bees_iterated = 0 for tag_type in day_bees_grouped_by_tag: for each_bee in day_bees[num_bees_iterated:len(day_bees_grouped_by_tag[tag_type])]: bee_id = each_bee.bee_id bee_id = each_bee.bee_id bee = bee_id_dict[bee_id] bee.tag_id = tag_type num_bees_iterated += len(day_bees_grouped_by_tag[tag_type]) num_bees_iterated = 0 for tag_type in night_bees_grouped_by_tag: for each_bee in night_bees[num_bees_iterated:len(night_bees_grouped_by_tag[tag_type])]: bee_id = each_bee.bee_id bee_id = each_bee.bee_id bee = bee_id_dict[bee_id] bee.tag_id = tag_type num_bees_iterated += len(night_bees_grouped_by_tag[tag_type]) day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy = self.generate_heatmaps(shuffled_day_bees, bee_id_dict, 'shuffled_spread_day_{}_{}'.format(day_num, i)) night_spread_all_tracked_individuals, night_spread_all_tracked_all_xy, night_spread_min_tracked_individuals, night_spread_min_tracked_all_xy = self.generate_heatmaps(shuffled_night_bees, bee_id_dict, 'shuffled_spread_night_{}_{}'.format(day_num, i)) day_mean_all_tracked_speeds, day_mean_min_tracked_speeds, day_median_all_tracked_speeds, day_median_min_tracked_speeds = self.generate_speeds(shuffled_day_bees, bee_id_dict, 'shuffled_speed_day_{}_{}'.format(day_num, i)) night_mean_all_tracked_speeds, night_mean_min_tracked_speeds, night_median_all_tracked_speeds, night_median_min_tracked_speeds = self.generate_speeds(shuffled_night_bees, bee_id_dict, 'shuffled_speed_night_{}_{}'.format(day_num, i)) day_percent_idle_all_tracked, day_percent_idle_min_tracked = self.idle_percentage(shuffled_day_bees, bee_id_dict, 'shuffled_percent_idle_day_{}_{}'.format(day_num, i)) night_percent_idle_all_tracked, night_percent_idle_min_tracked = self.idle_percentage(shuffled_night_bees, bee_id_dict, 'shuffled_percent_idle_night_{}_{}'.format(day_num, i)) day_list_node_degree, day_list_density, day_list_clustering = self.identify_relationships(shuffled_day_locations_bees, 'shuffled_network_day_{}_{}'.format(day_num, i)) night_list_node_degree, night_list_density, night_list_clustering = self.identify_relationships(shuffled_night_locations_bees, 'shuffled_network_night_{}_{}'.format(day_num, i)) self.logger.log_output(day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy, night_spread_all_tracked_individuals, night_spread_all_tracked_all_xy, night_spread_min_tracked_individuals, night_spread_min_tracked_all_xy, day_mean_all_tracked_speeds, day_mean_min_tracked_speeds, day_median_all_tracked_speeds, day_median_min_tracked_speeds, night_mean_all_tracked_speeds, night_mean_min_tracked_speeds, night_median_all_tracked_speeds, night_median_min_tracked_speeds, day_list_node_degree, day_list_density, day_list_clustering, night_list_node_degree, night_list_density, night_list_clustering, day_percent_idle_all_tracked, day_percent_idle_min_tracked, night_percent_idle_all_tracked, night_percent_idle_min_tracked, day_num, 'shuffled') if test_circadian: bootstrapped_day_bees = np.random.choice(day_bees, len(day_bees), replace=True) bootstrapped_night_bees = np.random.choice(night_bees, len(night_bees), replace=True) bootstrapped_day_locations_bees = np.random.choice(day_downsampled_bee_locations, len(day_downsampled_bee_locations), replace=True) bootstrapped_night_locations_bees = np.random.choice(night_downsampled_bee_locations, len(night_downsampled_bee_locations), replace=True) else: bootstrapped_day_bees = [] bootstrapped_night_bees = [] bootstrapped_day_locations_bees = day_downsampled_bee_locations bootstrapped_night_locations_bees = night_downsampled_bee_locations for tag_type in day_bees_grouped_by_tag: bootstrapped_tags = np.random.choice(day_bees_grouped_by_tag[tag_type], len(day_bees_grouped_by_tag[tag_type]), replace=True) bootstrapped_day_bees.extend(bootstrapped_tags) for tag_type in night_bees_grouped_by_tag: bootstrapped_tags = np.random.choice(day_bees_grouped_by_tag[tag_type], len(day_bees_grouped_by_tag[tag_type]), replace=True) bootstrapped_night_bees.extend(bootstrapped_tags) day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy = self.generate_heatmaps(bootstrapped_day_bees, bee_id_dict, 'bootstrapped_spread_day_{}_{}'.format(day_num, i)) night_spread_all_tracked_individuals, night_spread_all_tracked_all_xy, night_spread_min_tracked_individuals, night_spread_min_tracked_all_xy = self.generate_heatmaps(bootstrapped_night_bees, bee_id_dict, 'bootstrapped_spread_night_{}_{}'.format(day_num, i)) day_mean_all_tracked_speeds, day_mean_min_tracked_speeds, day_median_all_tracked_speeds, day_median_min_tracked_speeds = self.generate_speeds(bootstrapped_day_bees, bee_id_dict, 'bootstrapped_speed_day_{}_{}'.format(day_num, i)) night_mean_all_tracked_speeds, night_mean_min_tracked_speeds, night_median_all_tracked_speeds, night_median_min_tracked_speeds = self.generate_speeds(bootstrapped_night_bees, bee_id_dict, 'bootstrapped_speed_night_{}_{}'.format(day_num, i)) day_percent_idle_all_tracked, day_percent_idle_min_tracked = self.idle_percentage(bootstrapped_day_bees, bee_id_dict, 'bootstrapped_percent_idle_day_{}_{}'.format(day_num, i)) night_percent_idle_all_tracked, night_percent_idle_min_tracked = self.idle_percentage(bootstrapped_night_bees, bee_id_dict, 'bootstrapped_percent_idle_night_{}_{}'.format(day_num, i)) day_list_node_degree, day_list_density, day_list_clustering = self.identify_relationships(bootstrapped_day_locations_bees, 'bootstrapped_network_day_{}_{}'.format(day_num, i)) night_list_node_degree, night_list_density, night_list_clustering = self.identify_relationships(bootstrapped_night_locations_bees, 'bootstrapped_network_night_{}_{}'.format(day_num, i)) self.logger.log_output(day_spread_all_tracked_individuals, day_spread_all_tracked_all_xy, day_spread_min_tracked_individuals, day_spread_min_tracked_all_xy, night_spread_all_tracked_individuals, night_spread_all_tracked_all_xy, night_spread_min_tracked_individuals, night_spread_min_tracked_all_xy, day_mean_all_tracked_speeds, day_mean_min_tracked_speeds, day_median_all_tracked_speeds, day_median_min_tracked_speeds, night_mean_all_tracked_speeds, night_mean_min_tracked_speeds, night_median_all_tracked_speeds, night_median_min_tracked_speeds, day_list_node_degree, day_list_density, day_list_clustering, night_list_node_degree, night_list_density, night_list_clustering, day_percent_idle_all_tracked, day_percent_idle_min_tracked, night_percent_idle_all_tracked, night_percent_idle_min_tracked, day_num, 'bootstrapped') def retrieve_process_bees(self, day_bees, night_bees): day_bee_ids = [bee.bee_id for bee in day_bees] night_bee_ids = [bee.bee_id for bee in night_bees] bee_id_dict = {bee.bee_id: bee for bee in (day_bees + night_bees) } #list_bee_ids = list(bee_id_dict.keys()) def day_night_db_retrieve_analyse(list_bee_ids): time_period_locations_by_frame = {} db = DB() for i in range(0, len(list_bee_ids),200): subset_bee_ids = list_bee_ids[i:i+200] query_statement = db.query_string(table='bee_coords, paths', cols=['paths.BeeID', 'bee_coords.PathID', 'bee_coords.Frame', 'bee_coords.X', 'bee_coords.Y'], where='bee_coords.PathID = paths.PathID', group_condition='AND BeeID IN', group_list=subset_bee_ids, order='ORDER BY Frame ASC') coord_rows = db.query(query_statement) for row in coord_rows: bee_id = row['BeeID'] x = int(row['X'] / self.x_bins) y = int(row['Y'] / self.y_bins) yx_coord = (y, x) # for finding close interactions #bee_id_dict[bee_id].frame_xy[row['Frame']] = (row['X'], row['Y']) if row['Frame'] in time_period_locations_by_frame.keys(): time_period_locations_by_frame[row['Frame']].append((row['X'], row['Y'])) else: time_period_locations_by_frame[row['Frame']] = [(row['X'], row['Y'])] # heatmap location if yx_coord in bee_id_dict[bee_id].cells_visited: bee_id_dict[bee_id].cells_visited[yx_coord] += 1 else: bee_id_dict[bee_id].cells_visited[yx_coord] = 1 # calculate speeds and angles if bee_id_dict[bee_id].last_path_id == row['PathID']: bee_id_dict[bee_id].path_length += 1 if bee_id_dict[bee_id].path_length == self.frames_per_window: current_speed = Experiment.calc_distance(row['X'], row['Y'], bee_id_dict[bee_id].last_x, bee_id_dict[bee_id].last_y) bee_id_dict[bee_id].list_speeds.append(current_speed) bee_id_dict[bee_id].seconds_tracked += 1 if current_speed >= self.min_angle_speed: angle = Experiment.absolute_angle_degree(row['X'], row['Y'], bee_id_dict[bee_id].last_x, bee_id_dict[bee_id].last_y) bee_id_dict[bee_id].list_angles.append(angle) else: bee_id_dict[bee_id].seconds_idle += 1 bee_id_dict[bee_id].path_length = 1 bee_id_dict[bee_id].last_x = row['X'] bee_id_dict[bee_id].last_y = row['Y'] else: bee_id_dict[bee_id].last_path_id = row['PathID'] bee_id_dict[bee_id].path_length = 1 bee_id_dict[bee_id].last_x = row['X'] bee_id_dict[bee_id].last_y = row['Y'] db.close() downsampled_bee_locations = [] frames_list = list(time_period_locations_by_frame.keys()) frames_list.sort() for i, frame in enumerate(frames_list): if i % 1000 == 0: downsampled_bee_locations.append(time_period_locations_by_frame[frame]) return downsampled_bee_locations day_downsampled_bee_locations = day_night_db_retrieve_analyse(day_bee_ids) night_downsampled_bee_locations = day_night_db_retrieve_analyse(night_bee_ids) return (bee_id_dict, day_downsampled_bee_locations, night_downsampled_bee_locations) def identify_relationships(self, downsampled_bee_locations, plot_title): list_avg_node_degree, list_density, list_avg_clustering = ([],[],[]) for bees_in_frame in downsampled_bee_locations: bee_xy_list_nearby_bee_xy = {} for bee_xy in bees_in_frame: bee_xy_list_nearby_bee_xy[bee_xy] = [] for other_bees_xy in bees_in_frame: if bee_xy != other_bees_xy and Experiment.calc_distance(bee_xy[0], bee_xy[1], other_bees_xy[0], other_bees_xy[1]) < 200: bee_xy_list_nearby_bee_xy[bee_xy].append(other_bees_xy) all_relations, direct_relations = self.group_bees_by_relationship(bee_xy_list_nearby_bee_xy) G=nx.Graph() for rel_group in direct_relations: G.add_nodes_from(rel_group) for bee_xy in rel_group: for other_bees_xy in rel_group: if bee_xy != other_bees_xy: G.add_edge(bee_xy, other_bees_xy) degree_list = list(G.degree().values()) avg_node_degree = sum(degree_list)/len(G.degree()) density = nx.density(G) avg_clustering = nx.average_clustering(G) list_avg_node_degree.append(avg_node_degree) list_density.append(density) list_avg_clustering.append(avg_clustering) return (list_avg_node_degree, list_density, list_avg_clustering) def group_bees_by_relationship(self, bee_xy_list_nearby_bee_xy): all_relationships_in_frame = [] direct_relationships_in_frame = [] for bee_xy in bee_xy_list_nearby_bee_xy.keys(): all_relationships = [bee_xy] direct_relationships = set() for other_bees_xy in bee_xy_list_nearby_bee_xy.keys(): if bee_xy != other_bees_xy: s1 = set([bee_xy]+bee_xy_list_nearby_bee_xy[bee_xy]) s2 = set([other_bees_xy]+bee_xy_list_nearby_bee_xy[other_bees_xy]) intersect_sets = s1.intersection(s2) if len(intersect_sets) > 0: all_relationships.append(other_bees_xy) direct_relationships = direct_relationships.union(intersect_sets) bee_xy_not_present_in_all_relationships_in_frame = True bee_xy_not_present_in_direct_relationships_in_frame = True for rel_group in all_relationships_in_frame: if bee_xy in rel_group: bee_xy_not_present_in_all_relationships_in_frame = False for rel_group in direct_relationships_in_frame: if bee_xy in rel_group: bee_xy_not_present_in_direct_relationships_in_frame = False if bee_xy_not_present_in_all_relationships_in_frame: all_relationships_in_frame.append(all_relationships) if bee_xy_not_present_in_direct_relationships_in_frame: if len(direct_relationships) == 0: direct_relationships = direct_relationships.union(set([bee_xy])) direct_relationships_in_frame.append(list(direct_relationships)) return(all_relationships_in_frame, direct_relationships_in_frame) def generate_heatmaps(self, list_bees, bee_id_dict, plot_title): all_tracked_individuals_heatmaps = {0: np.zeros((self.num_y_cells, self.num_x_cells)), 1: np.zeros((self.num_y_cells, self.num_x_cells)), 2: np.zeros((self.num_y_cells, self.num_x_cells)), 3: np.zeros((self.num_y_cells, self.num_x_cells)), 'All': np.zeros((self.num_y_cells, self.num_x_cells))} all_tracked_all_xy_points_heatmaps = copy.deepcopy(all_tracked_individuals_heatmaps) min_tracked_individuals_heatmaps = copy.deepcopy(all_tracked_individuals_heatmaps) min_tracked_all_xy_points_heatmaps = copy.deepcopy(all_tracked_individuals_heatmaps) #print([bee.bee_id for bee in list_bees], '\n') for each_bee in list_bees: bee_id = each_bee.bee_id bee = bee_id_dict[bee_id] for yx_coord in bee.cells_visited: y, x = yx_coord all_tracked_individuals_heatmaps[bee.tag_id][yx_coord] += 1 all_tracked_individuals_heatmaps['All'][yx_coord] += 1 all_tracked_all_xy_points_heatmaps[bee.tag_id][yx_coord] += bee.cells_visited[yx_coord] all_tracked_all_xy_points_heatmaps['All'][yx_coord] += bee.cells_visited[yx_coord] if bee.length_tracked > self.min_time_tracked: #print('if', bee.length_tracked) min_tracked_individuals_heatmaps[bee.tag_id][yx_coord] += 1 min_tracked_individuals_heatmaps['All'][yx_coord] += 1 min_tracked_all_xy_points_heatmaps[bee.tag_id][yx_coord] += bee.cells_visited[yx_coord] min_tracked_all_xy_points_heatmaps['All'][yx_coord] += bee.cells_visited[yx_coord] heatmap_dictionaries_list = [all_tracked_individuals_heatmaps, all_tracked_all_xy_points_heatmaps, min_tracked_individuals_heatmaps, min_tracked_all_xy_points_heatmaps] spread_heatmap_dicts = [{},{},{},{}] for tag_group in heatmap_dictionaries_list[0]: for i, hm_dictionary in enumerate(heatmap_dictionaries_list): norm_heatmap = heatmap_dictionaries_list[i][tag_group] / heatmap_dictionaries_list[i][tag_group].sum() centre = ndimage.measurements.center_of_mass(norm_heatmap) spread = 0 for y_c in range(0, norm_heatmap.shape[0]): for x_c in range(0, norm_heatmap.shape[1]): spread += Experiment.calc_distance(x_c, y_c, centre[1], centre[0]) * norm_heatmap[y_c, x_c] spread_heatmap_dicts[i][tag_group] = spread return spread_heatmap_dicts def generate_speeds(self, list_bees, bee_id_dict, plot_title): all_tracked_speeds = {0: [], 1: [], 2: [], 3: [], 'All': []} min_tracked_speeds = copy.deepcopy(all_tracked_speeds) for each_bee in list_bees: bee_id = each_bee.bee_id bee = bee_id_dict[bee_id] all_tracked_speeds[bee.tag_id].extend(bee.list_speeds) all_tracked_speeds['All'].extend(bee.list_speeds) if bee.length_tracked > self.min_time_tracked: min_tracked_speeds[bee.tag_id].extend(bee.list_speeds) min_tracked_speeds['All'].extend(bee.list_speeds) mean_all_tracked_speeds, mean_min_tracked_speeds, median_all_tracked_speeds, median_min_tracked_speeds = ({}, {}, {}, {}) for tag_id in all_tracked_speeds: mean_all_tracked_speeds[tag_id] = np.mean(all_tracked_speeds[tag_id]) mean_min_tracked_speeds[tag_id] = np.mean(min_tracked_speeds[tag_id]) median_all_tracked_speeds[tag_id] = np.median(all_tracked_speeds[tag_id]) median_min_tracked_speeds[tag_id] = np.median(min_tracked_speeds[tag_id]) return [mean_all_tracked_speeds, mean_min_tracked_speeds, median_all_tracked_speeds, median_min_tracked_speeds] def idle_percentage(self, list_bees, bee_id_dict, plot_title): # list, first element is incrementing time idle, second is incremented total time track all_tracked_idle_total = {0: [0,0], 1: [0,0], 2: [0,0], 3: [0,0], 'All': [0,0]} min_tracked_idle_total = copy.deepcopy(all_tracked_idle_total) for each_bee in list_bees: bee_id = each_bee.bee_id bee = bee_id_dict[bee_id] all_tracked_idle_total[bee.tag_id][0] += (bee.seconds_idle) all_tracked_idle_total[bee.tag_id][1] += (bee.seconds_tracked) all_tracked_idle_total['All'][0] += (bee.seconds_idle) all_tracked_idle_total['All'][1] += (bee.seconds_tracked) if bee.length_tracked > self.min_time_tracked: min_tracked_idle_total[bee.tag_id][0] += (bee.seconds_idle) min_tracked_idle_total[bee.tag_id][1] += (bee.seconds_tracked) min_tracked_idle_total['All'][0] += (bee.seconds_idle) min_tracked_idle_total['All'][1] += (bee.seconds_tracked) percent_idle_all_tracked, percent_idle_min_tracked = ({}, {}) for tag_id in all_tracked_idle_total: if all_tracked_idle_total[tag_id][1] != 0: percent_idle_all_tracked[tag_id] = all_tracked_idle_total[tag_id][0] / all_tracked_idle_total[tag_id][1] else: percent_idle_all_tracked[tag_id] = 0 if min_tracked_idle_total[tag_id][1] != 0: percent_idle_min_tracked[tag_id] = min_tracked_idle_total[tag_id][0] / min_tracked_idle_total[tag_id][1] else: percent_idle_min_tracked[tag_id] = 0 return [percent_idle_all_tracked, percent_idle_min_tracked] def generate_angles(self, list_bees, bee_id_dict, plot_title): all_tracked_angles = {0: np.zeros(360 / 20), 1: np.zeros(360 / 20), 2: np.zeros(360 / 20), 3: np.zeros(360 / 20), 'All': np.zeros(360 / 20)} min_tracked_angles = copy.deepcopy(all_tracked_speeds) for each_bee in list_bees: bee_id = each_bee.bee_id bee = bee_id_dict[bee_id] if len(bee.list_angles) > 0: angles_hist = Graphics.create_angles_hist(bee.list_angles) all_tracked_angles[bee.tag_id] += angles_hist all_tracked_angles['All'] += angles_hist if bee.path_length > self.min_time_tracked: min_tracked_angles[bee.tag_id] += angles_hist min_tracked_angles['All'] += angles_hist return None def main(): pass if __name__ == "__main__": main()
from __future__ import (absolute_import, division, print_function) from gridded.pysgrid.sgrid import SGrid from gridded.pyugrid.ugrid import UGrid import numpy as np from gridded.utilities import get_dataset, gen_celltree_mask_from_center_mask from six import string_types class GridBase(object): ''' Base object for grids to share common behavior ''' _def_count = 0 def __init__(self, filename=None, *args, **kwargs): """ Init common to all Grid types. This initializer will take all the kwargs of both pyugrid.UGrid and pysgrid.SGrid. See their documentation for details :param filename: Name of the file this grid was constructed from, if available. """ if 'name' in kwargs: self.name = kwargs['name'] else: self.name = self.__class__.__name__ + '_' + str(type(self)._def_count) self.filename = filename type(self)._def_count += 1 super(GridBase, self).__init__(**kwargs) @classmethod def from_netCDF(cls, *args, **kwargs): kwargs['grid_type'] = cls return Grid.from_netCDF(*args, **kwargs) @classmethod def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None,): ''' This function is the top level 'search for attributes' function. If there are any common attributes to all potential grid types, they will be sought here. This function returns a dict, which maps an attribute name to a netCDF4 Variable or numpy array object extracted from the dataset. When called from Grid_U or Grid_S, this function should provide all the kwargs needed to create a valid instance. ''' gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()]) init_args = {} gt = {} init_args['filename'] = filename node_attrs = ['node_lon', 'node_lat'] node_coord_names = [['node_lon', 'node_lat'], ['lon', 'lat'], ['lon_psi', 'lat_psi'], ['longitude', 'latitude']] composite_node_names = ['nodes', 'node'] if grid_topology is None: for n1, n2 in node_coord_names: if n1 in gf_vars and n2 in gf_vars: init_args[node_attrs[0]] = gf_vars[n1][:] init_args[node_attrs[1]] = gf_vars[n2][:] gt[node_attrs[0]] = n1 gt[node_attrs[1]] = n2 break if node_attrs[0] not in init_args: for n in composite_node_names: if n in gf_vars: v = gf_vars[n][:].reshape(-1, 2) init_args[node_attrs[0]] = v[:, 0] init_args[node_attrs[1]] = v[:, 1] gt['node_coordinates'] = n break if node_attrs[0] not in init_args: raise ValueError('Unable to find node coordinates.') else: for n, v in grid_topology.items(): if n in node_attrs: init_args[n] = gf_vars[v][:] if n in composite_node_names: v = gf_vars[n][:].reshape(-1, 2) init_args[node_attrs[0]] = v[:, 0] init_args[node_attrs[1]] = v[:, 1] return init_args, gt @property def shape(self): return self.node_lon.shape def __eq__(self, o): if self is o: return True for n in ('nodes', 'faces'): if (hasattr(self, n) and hasattr(o, n) and getattr(self, n) is not None and getattr(o, n) is not None): s = getattr(self, n) s2 = getattr(o, n) if s.shape != s2.shape or np.any(s != s2): return False return True def _write_grid_to_file(self, pth): self.save_as_netcdf(pth) def import_variable(self, variable, location='node'): """ Takes a Variable or VectorVariable and interpolates the data onto this grid. You may pass a location ('nodes', 'faces', 'edge1', 'edge2) and the variable will be interpolated there if possible If no location is passed, the variable will be interpolated to the nodes of this grid. If the Variable's grid and this grid are the same, this function will return the Variable unchanged. If this grid covers area that the source grid does not, all values in this area will be masked. If regridding from cell centers to the nodes, The values of any border point not within will be equal to the value at the center of the border cell. """ raise NotImplementedError("GridBase cannot interpolate variables to itself") class Grid_U(GridBase, UGrid): @classmethod def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None): gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()]) # Get superset attributes init_args, gt = super(Grid_U, cls)._find_required_grid_attrs(filename=filename, dataset=dataset, grid_topology=grid_topology) face_attrs = ['faces'] face_var_names = ['faces', 'tris', 'nv', 'ele'] if grid_topology is None: for n in face_var_names: if n in gf_vars: init_args[face_attrs[0]] = gf_vars[n][:] gt[face_attrs[0]] = n break if face_attrs[0] not in init_args: raise ValueError('Unable to find face connectivity array.') else: for n, v in grid_topology.items(): if n in face_attrs: init_args[n] = gf_vars[v][:] break if init_args['faces'].shape[0] == 3: init_args['faces'] = np.ascontiguousarray(np.array(init_args['faces']).T - 1) return init_args, gt @classmethod def gen_from_quads(cls, nodes): if not len(nodes.shape) == 3: raise ValueError('Nodes of a quad grid must be 2 dimensional') lin_nodes = None if isinstance(nodes, np.ma.MaskedArray): lin_nodes = nodes.reshape(-1, 2)[nodes] class Grid_S(GridBase, SGrid): @classmethod def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None): # THESE ARE ACTUALLY ALL OPTIONAL. This should be migrated when optional attributes # are dealt with # Get superset attributes gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()]) init_args, gt = super(Grid_S, cls)._find_required_grid_attrs(filename, dataset=dataset, grid_topology=grid_topology) center_attrs = ['center_lon', 'center_lat'] edge1_attrs = ['edge1_lon', 'edge1_lat'] edge2_attrs = ['edge2_lon', 'edge2_lat'] node_mask = 'node_mask' center_mask = 'center_mask' edge1_mask = 'edge1_mask' edge2_mask = 'edge2_mask' center_coord_names = [['center_lon', 'center_lat'], ['lon_rho', 'lat_rho'], ['lonc', 'latc']] edge1_coord_names = [['edge1_lon', 'edge1_lat'], ['lon_u', 'lat_u']] edge2_coord_names = [['edge2_lon', 'edge2_lat'], ['lon_v', 'lat_v']] node_mask_names = ['mask_psi'] center_mask_names = ['mask_rho'] edge1_mask_names = ['mask_u'] edge2_mask_names = ['mask_v'] if grid_topology is None: for attr, names, maskattr, maskname in (zip((center_attrs, edge1_attrs, edge2_attrs), (center_coord_names, edge1_coord_names, edge2_coord_names), (center_mask, edge1_mask, edge2_mask), (center_mask_names, edge1_mask_names, edge2_mask_names))): for n1, n2 in names: if n1 in gf_vars and n2 in gf_vars: mask = False #for n in maskname: #if n in gf_vars: #mask = gen_mask(gf_vars[n]) a1 = gf_vars[n1][:] a2 = gf_vars[n2][:] init_args[attr[0]] = a1 init_args[attr[1]] = a2 if maskname[0] in gf_vars: init_args[maskattr] = gf_vars[maskname[0]] gt[maskattr] = maskname[0] gt[attr[0]] = n1 gt[attr[1]] = n2 break if 'node_lon' in init_args and 'node_lat' in init_args: mask = False for name in node_mask_names: if name in gf_vars: init_args[node_mask] = gf_vars[name] gt[node_mask] = name else: for n, v in grid_topology.items(): if n in center_attrs + edge1_attrs + edge2_attrs and v in gf_vars: init_args[n] = gf_vars[v][:] return init_args, gt class Grid_R(GridBase): def __init__(self, node_lon=None, node_lat=None, grid_topology=None, dimensions=None, node_dimensions=None, node_coordinates=None, *args, **kwargs): self.node_lon = node_lon self.node_lat = node_lat self.grid_topology = grid_topology self.dimensions = dimensions self.node_dimensions = node_dimensions self.node_coordinates = node_coordinates super(Grid_R, self).__init__(*args,**kwargs) @classmethod def _find_required_grid_attrs(cls, filename, dataset=None, grid_topology=None): # THESE ARE ACTUALLY ALL OPTIONAL. This should be migrated when optional attributes # are dealt with # Get superset attributes gf_vars = dataset.variables if dataset is not None else get_dataset(filename).variables gf_vars = dict([(k.lower(), v) for k, v in gf_vars.items()] ) init_args, gt = super(Grid_R, cls)._find_required_grid_attrs(filename, dataset=dataset, grid_topology=grid_topology) # Grid_R only needs node_lon and node_lat. However, they must be a specific shape (1D) node_lon = init_args['node_lon'] node_lat = init_args['node_lat'] if len(node_lon.shape) != 1: raise ValueError('Too many dimensions in node_lon. Must be 1D, was {0}D'.format(len(node_lon.shape))) if len(node_lat.shape) != 1: raise ValueError('Too many dimensions in node_lat. Must be 1D, was {0}D'.format(len(node_lat.shape))) return init_args, gt @property def nodes(self): return np.stack((np.meshgrid(self.node_lon, self.node_lat)), axis=-1) @property def center_lon(self): return (self.node_lon[0:-1] + self.node_lon[1:]) / 2 @property def center_lat(self): return (self.node_lat[0:-1] + self.node_lat[1:]) / 2 @property def centers(self): return np.stack((np.meshgrid(self.center_lon, self.center_lat)), axis=-1) def locate_faces(self, points): """ Returns the node grid indices, one per point. Points that are not on the node grid will have an index of -1 If a single point is passed in, a single index will be returned. If a sequence of points is passed in an array of indexes will be returned. :param points: The points that you want to locate -- (lon, lat). If the shape of point is 1D, function will return a scalar index. If it is 2D, it will return a 1D array of indices. :type points: array-like containing one or more points: shape (2,) for one point, shape (N, 2) for more than one point. """ points = np.asarray(points, dtype=np.float64) just_one = (points.ndim == 1) points = points.reshape(-1, 2) lons = points[:, 0] lats = points[:, 1] lon_idxs = np.digitize(lons, self.node_lon) - 1 for i, n in enumerate(lon_idxs): if n == len(self.node_lon) - 1: lon_idxs[i] = -1 # if n == 0 and not lons[i] < self.node_lon.max() and not lons[i] >= self.node_lon.min(): # lon_idxs[i] = -1 lat_idxs = np.digitize(lats, self.node_lat) - 1 for i, n in enumerate(lat_idxs): if n == len(self.node_lat) -1: lat_idxs[i] = -1 # if n == 0 and not lats[i] < self.node_lat.max() and not lats[i] >= self.node_lat.min(): # lat_idxs[i] = -1 idxs = np.column_stack((lon_idxs, lat_idxs)) idxs[:,0] = np.where(idxs[:,1] == -1, -1, idxs[:,0]) idxs[:,1] = np.where(idxs[:,0] == -1, -1, idxs[:,1]) if just_one: res = idxs[0] return res else: return idxs def interpolate_var_to_points(self, points, variable, method='linear', indices=None, slices=None, mask=None, **kwargs): try: from scipy.interpolate import RegularGridInterpolator except ImportError: raise ImportError("The scipy package is required to use " "Grid_R.interpolate_var_to_points\n" " -- interpolating a regular grid") points = np.asarray(points, dtype=np.float64) just_one = (points.ndim == 1) points = points.reshape(-1, 2) if slices is not None: variable = variable[slices] if np.ma.isMA(variable): variable = variable.filled(0) #eventually should use Variable fill value x = self.node_lon if variable.shape[0] == len(self.node_lon) else self.node_lat y = self.node_lat if x is self.node_lon else self.node_lon interp_func = RegularGridInterpolator((x, y), variable, method=method, bounds_error=False, fill_value=0) if x is self.node_lon: vals = interp_func(points, method=method) else: vals = interp_func(points[:, ::-1], method=method) if just_one: return vals[0] else: return vals def infer_location(self, variable): """ fixme: should first look for "location" attribute. But now we are checking variable dimensions to which part of the grid it is on. """ shape = None node_shape = self.nodes.shape[0:-1] # centers_shape = self.centers.shape[0:-1] try: shape = np.array(variable.shape) except: return None # Variable has no shape attribute! if len(variable.shape) < 2: return None difference = (shape[-2:] - node_shape).tolist() if (difference == [1, 1] or difference == [-1, -1]) and self.center_lon is not None: return 'center' elif difference == [1, 0] and self.edge1_lon is not None: return 'edge1' elif difference == [0, 1] and self.edge2_lon is not None: return 'edge2' elif difference == [0, 0] and self.node_lon is not None: return 'node' else: return None class Grid(object): ''' Factory class that generates grid objects. Also handles common loading and parsing operations ''' def __init__(self): ''' Init common to all Grid types. This constructor will take all the kwargs of both pyugrid.UGrid and pysgrid.SGrid. See their documentation for details :param filename: Name of the file this grid was constructed from, if available. ''' raise NotImplementedError("Grid is not meant to be instantiated. " "Please use the from_netCDF function. " "or initialize the type of grid you want directly") @staticmethod def _load_grid(filename, grid_type, dataset=None): ''' Redirect to grid-specific loading routine. ''' if issubclass(grid_type, UGrid): return grid_type.from_ncfile(filename) elif issubclass(grid_type, SGrid): ds = get_dataset(filename, dataset) g = grid_type.load_grid(ds) g.filename = filename return g else: return grid_type.from_ncfile(filename) pass @staticmethod def from_netCDF(filename=None, dataset=None, grid_type=None, grid_topology=None, _default_types=(('ugrid', Grid_U), ('sgrid', Grid_S), ('rgrid', Grid_R)), *args, **kwargs): ''' :param filename: File containing a grid :param dataset: Takes precedence over filename, if provided. :param grid_type: Must be provided if Dataset does not have a 'grid_type' attribute, or valid topology variable :param grid_topology: A dictionary mapping of grid attribute to variable name. Takes precedence over discovered attributes :param kwargs: All kwargs to SGrid, UGrid, or RGrid are valid, and take precedence over all. :returns: Instance of Grid_U, Grid_S, or Grid_R ''' gf = dataset if filename is None else get_dataset(filename, dataset) if gf is None: raise ValueError('No filename or dataset provided') cls = grid_type if (grid_type is None or isinstance(grid_type, string_types) or not issubclass(grid_type, GridBase)): cls = Grid._get_grid_type(gf, grid_type, grid_topology, _default_types) # if grid_topology is passed in, don't look for the variable if not grid_topology: compliant = Grid._find_topology_var(None, gf) else: compliant = None if compliant is not None: c = Grid._load_grid(filename, cls, dataset) c.grid_topology = compliant.__dict__ else: init_args, gt = cls._find_required_grid_attrs(filename, dataset=dataset, grid_topology=grid_topology) c = cls(**init_args) c.grid_topology = gt return c @staticmethod def _get_grid_type(dataset, grid_type=None, grid_topology=None, _default_types=(('ugrid', Grid_U), ('sgrid', Grid_S), ('rgrid', Grid_R))): # fixme: this logic should probably be defered to # the grid type code -- that is, ask each grid # type if this dataset is its type. # # It also should be refactored to start with the standards # and maybe havev a pedantic mode where it won't load non-standard # files if _default_types is None: _default_types = dict() else: _default_types = dict(_default_types) Grid_U = _default_types.get('ugrid', None) Grid_S = _default_types.get('sgrid', None) Grid_R = _default_types.get('rgrid', None) sgrid_names = ['sgrid', 'pygrid_s', 'staggered', 'curvilinear', 'roms'] ugrid_names = ['ugrid', 'pygrid_u', 'triangular', 'unstructured'] rgrid_names = ['rgrid', 'regular', 'rectangular', 'rectilinear'] if grid_type is not None: if grid_type.lower() in sgrid_names: return Grid_S elif grid_type.lower() in ugrid_names: return Grid_U elif grid_type.lower() in rgrid_names: return Grid_R else: raise ValueError('Specified grid_type not recognized/supported') if grid_topology is not None: if ('faces' in grid_topology.keys() or grid_topology.get('grid_type', 'notype').lower() in ugrid_names): return Grid_U elif grid_topology.get('grid_type', 'notype').lower() in rgrid_names: return Grid_R else: return Grid_S else: # no topology, so search dataset for grid_type variable if (hasattr(dataset, 'grid_type') and dataset.grid_type in sgrid_names + ugrid_names): if dataset.grid_type.lower() in ugrid_names: return Grid_U elif dataset.grid_type.lower() in rgrid_names: return Grid_R else: return Grid_S else: # TODO: Determine an effective decision tree for picking if # a topology variable is present # no grid type explicitly specified. is a topology variable present? topology = Grid._find_topology_var(None, dataset=dataset) if topology is not None: if (hasattr(topology, 'node_coordinates') and not hasattr(topology, 'node_dimensions')): return Grid_U else: return Grid_S else: # no topology variable either, so generate and try again. # if no defaults are found, _gen_topology will raise an error try: u_init_args, u_gf_vars = Grid_U._find_required_grid_attrs(None, dataset) return Grid_U except ValueError: try: r_init_args, r_gf_vars = Grid_R._find_required_grid_attrs(None, dataset) return Grid_R except ValueError: try: s_init_args, s_gf_vars = Grid_S._find_required_grid_attrs(None, dataset) except ValueError: raise ValueError("Can not figure out what type of grid this is. " "Try specifying the grid_topology attributes " "or specifying the grid type") return Grid_S @staticmethod def _find_topology_var(filename, dataset=None): gf = get_dataset(filename, dataset) gts = [] for k, v in gf.variables.items(): if hasattr(v, 'cf_role') and 'topology' in v.cf_role: gts.append(v) # gts = gf.get_variables_by_attributes(cf_role=lambda t: t is not None and 'topology' in t) if len(gts) != 0: return gts[0] else: return None
import datetime from flask import request, current_app, _request_ctx_stack from flask.ext.restful import reqparse, Resource, abort, marshal from flask.ext.restful.fields import Boolean, String, Integer, DateTime, Raw from flask.ext.sqlalchemy import BaseQuery, Pagination, get_state from flask.views import MethodViewType from sqlalchemy.dialects import postgres from sqlalchemy.orm import class_mapper from sqlalchemy.orm.exc import NoResultFound from flask_presst.signals import before_create_item, after_create_item, before_create_relationship, \ after_create_relationship, before_delete_relationship, after_delete_relationship, before_update_item, \ before_delete_item, after_delete_item, after_update_item, on_filter_read, on_filter_update, \ on_filter_delete from flask_presst.fields import _RelationshipField, Array, KeyValue, Date, JSON from flask_presst.nesting import NestedProxy from flask_presst.parsing import PresstArgument import six LINK_HEADER_FORMAT_STR = '<{0}?page={1}&per_page={2}>; rel="{3}"' class PresstResourceMeta(MethodViewType): def __new__(mcs, name, bases, members): class_ = super(PresstResourceMeta, mcs).__new__(mcs, name, bases, members) if hasattr(class_, '_meta'): try: meta = dict(getattr(class_, 'Meta').__dict__) # copy mappingproxy into dict. except AttributeError: meta = {} class_.resource_name = meta.get('resource_name', class_.__name__).lower() class_.nested_types = nested_types = {} class_._id_field = meta.get('id_field', 'id') class_._required_fields = meta.get('required_fields', []) class_._fields = fields = dict() class_._read_only_fields = set(meta.get('read_only_fields', [])) class_._meta = meta for name, m in six.iteritems(members): if isinstance(m, (_RelationshipField, NestedProxy)): m.bound_resource = class_ if m.relationship_name is None: m.relationship_name = name if isinstance(m, NestedProxy): nested_types[m.relationship_name] = m elif isinstance(m, Raw): fields[m.attribute or name] = m return class_ class PresstResource(six.with_metaclass(PresstResourceMeta, Resource)): """ Resource item property fields are defined as as class attributes. e.g.: .. code-block:: python class PersonResource(PresstResource): name = fields.String() age = fields.Integer() # ... Each new subclass of :class:`PresstResource` can be configured using a class attribute, :class:`Meta`, which includes properties that are applied at creation time by the :class:`PresstResource`'s metaclass. The following attributes can be declared within :class:`Meta`: ===================== ============================================================================== Attribute name Description ===================== ============================================================================== resource_name The name of the resource used to build the resource endpoint, also used for referencing the resource using e.g. :class:`fields.ToMany`. *Default: the lower-case of the class name of the resource* id_field The default implementation of :class:`PresstResource` attempts to read the id of each resource item using this attribute or item key. The id field will never be marshalled [#f1]_. *Default: 'id'* required_fields A list of fields that must be given in `POST` requests. read_only_fields A list of fields that are returned by the resource but are ignored in `POST` and `PATCH` requests. ===================== ============================================================================== .. rubric:: Footnotes .. [#f1] Adventurous people can override :meth:`marshal_item` to include `id_field` instead of or in addition to ``'resource_uri'``. """ api = None resource_name = None nested_types = None _meta = None _id_field = None _fields = None _read_only_fields = None _required_fields = None def get(self, id=None, **kwargs): if id is None: item_list = self.get_item_list() return self.marshal_item_list(item_list) else: item = self.get_item_for_id(id) return self.marshal_item(item) def post(self, id=None, *args, **kwargs): if id is None: return self.marshal_item(self.create_item(self.request_parse_item())) else: return self.marshal_item(self.update_item(id, self.request_parse_item())) def patch(self, id): if id is None: abort(400, message='PATCH is not permitted on collections.') else: # TODO consider abort(400) if request.JSON is not a dictionary. changes = self.request_parse_item(limit_fields=(name for name in self._fields if name in request.json)) return self.marshal_item(self.update_item(id, changes, partial=True)) def delete(self, id, *args, **kwargs): if id is None: abort(400, message='DELETE is not permitted on collections.') else: self.delete_item(id) return None, 204 @classmethod def get_item_for_id(cls, id_): # pragma: no cover """ Must be implemented to either return the item or raise an exception such as :class:`werkzeug.exceptions.NotFound`. :param id_: id of the resource item to return """ raise NotImplementedError() @classmethod def item_get_id(cls, item): """ Returns the id attribute of a given item. Uses ``Meta.id_field``. """ return getattr(item, cls._id_field, None) or item[cls._id_field] @classmethod def get_item_list(cls): # pragma: no cover """ Must be implemented in top-level resources to return a list of items in the collection. .. note:: The term *list* here is flexible, as any type of object is valid if it can be processed by :meth:`marshal_item_list`. The default implementation supports any iterable. It is encouraged to implement some form of lazy-loading or to trim the list based on the ``request``. .. seealso:: :meth:`marshal_item_list` """ raise NotImplementedError() @classmethod def get_item_list_for_relationship(cls, relationship, parent_item): # pragma: no cover """ Return the list of items for a relationship. Must be implemented in nested resources. :param str relationship: name of the relationship in the parent level resource :param parent_item: instance of the item from the parent level resource """ raise NotImplementedError() @classmethod def create_item_relationship(cls, id_, relationship, parent_item): # pragma: no cover """ Add an item to a relationship. Must be implemented in nested resources. :param id_: the id of the item to add to the relationship :param str relationship: name of the relationship in the parent level resource :param parent_item: instance of the item from the parent level resource """ raise NotImplementedError() @classmethod def delete_item_relationship(cls, id_, relationship, parent_item): # pragma: no cover """ Delete an item from a relationship. Must be implemented in nested resources. :param id_: the id of the item to remove from the relationship :param str relationship: name of the relationship in the parent level resource :param parent_item: instance of the item from the parent level resource """ raise NotImplementedError() @classmethod def create_item(cls, dct): # pragma: no cover """ Must be implemented to create a new item in the resource collection. :param dict dct: parsed resource fields :return: the new item """ raise NotImplementedError() @classmethod def update_item(cls, id_, dct, partial=False): # pragma: no cover """ Must be implemented to update an item in the resource collection. :param id_: id of the item to update :param dict dct: dictionary of changes :param bool partial: whether this is a `PATCH` change """ raise NotImplementedError() @classmethod def delete_item(cls, id_): # pragma: no cover """ Must be implemented to delete an item from the resource collection. :param id_: id of the item to delete """ raise NotImplementedError() @classmethod def item_get_resource_uri(cls, item): """Returns the `resource_uri` of an item. .. seealso:: :meth:`item_get_id()` """ if cls.api is None: raise RuntimeError("{} has not been registered as an API endpoint.".format(cls.__name__)) return cls.api.url_for(cls, id=cls.item_get_id(item)) @classmethod def marshal_item(cls, item): """ Marshals the item using the resource fields and returns a JSON-compatible dictionary. """ marshaled = {'resource_uri': cls.item_get_resource_uri(item)} marshaled.update(marshal(item, cls._fields)) return marshaled @classmethod def marshal_item_list(cls, items): """ Marshals a list of items from the resource. .. seealso:: :meth:`marshal_item` """ return list(cls.marshal_item(item) for item in items) def request_parse_item(self, limit_fields=None): """ Helper method to parse an item from the request. :param limit_fields: optional list of field names to parse; if not set, all fields will be parsed. """ parser = reqparse.RequestParser(argument_class=PresstArgument) for name in limit_fields or self._fields: # FIXME handle this in PresstArgument. if name not in self._read_only_fields: required = name in self._required_fields parser.add_argument(name, type=self._fields[name], required=required, ignore=not required) return parser.parse_args() class ModelResourceMeta(PresstResourceMeta): def __new__(mcs, name, bases, members): class_ = super(ModelResourceMeta, mcs).__new__(mcs, name, bases, members) meta = class_._meta if meta: class_._model = model = meta.get('model', None) if not model: return class_ mapper = class_mapper(model) # TODO support multiple primary keys with child resources. assert len(mapper.primary_key) == 1 class_._id_field = meta.get('id_field', mapper.primary_key[0].name) if 'id_field' in meta: class_._model_id_column = getattr(model, meta['id_field']) else: class_._model_id_column = mapper.primary_key[0] class_._field_types = field_types = {} class_.resource_name = meta.get('resource_name', model.__tablename__).lower() fields, required_fields = class_._fields, class_._required_fields include_fields = meta.get('include_fields', None) exclude_fields = meta.get('exclude_fields', None) for name, column in six.iteritems(dict(mapper.columns)): if (include_fields and name in include_fields) or \ (exclude_fields and name not in exclude_fields) or \ not (include_fields or exclude_fields): field_class = None if meta.get('exclude_polymorphic', False) and column.table != mapper.tables[-1]: continue if column.primary_key or column.foreign_keys: continue if isinstance(column.type, postgres.ARRAY): field_type = list elif isinstance(column.type, postgres.HSTORE): field_type = dict field_class = KeyValue elif hasattr(postgres, 'JSON') and isinstance(column.type, postgres.JSON): field_type = lambda data: data field_class = JSON else: field_type = column.type.python_type field_types[name] = field_type # Add to list of fields. if not name in fields: if not field_class: field_class = class_._get_field_from_python_type(field_type) # TODO implement support for ColumnDefault # fields[name] = field_class(default=column.default) fields[name] = field_class() if not (column.nullable or column.default): required_fields.append(name) return class_ class ModelResource(six.with_metaclass(ModelResourceMeta, PresstResource)): """ :class:`ModelResource` inherits all of the :class:`Meta` options of :class:`PresstResource`, however with slightly different behavior and including some additions. ===================== ============================================================================== Attribute name Description ===================== ============================================================================== model The :class:`sqlalchemy.ext.declarative.declarative_base` model this resource maps to. Tested only with `Flask-SQLAlchemy` models. resource_name Now defaults to the lower-case of the model class name. id_field Now defaults to the name of the primary key of `model`. include_fields A list of fields that should be imported from the `model`. By default, all columns other than foreign key and primary key columns are imported. :func:`sqlalchemy.orm.relationship` model attributes and hybrid properties cannot be defined in this way and have to be specified explicitly as resource class attributes. exclude_fields A list of fields that should not be imported from the `model`. exclude_polymorphic Whether to exclude fields that are inherited from the parent model of a polymorphic model. *Defaults to False* required_fields Fields that are automatically imported from the model are automatically required if their columns are not `nullable` and do not have a `default`. ===================== ============================================================================== This resource class processes all of the signals in :mod:`flask.ext.presst.signals`. """ _model = None _model_id_column = None _field_types = None @staticmethod def _get_field_from_python_type(python_type): return { str: String, six.text_type: String, int: Integer, bool: Boolean, list: Array, dict: KeyValue, datetime.date: Date, datetime.datetime: DateTime }[python_type] @classmethod def _get_session(cls): return get_state(current_app).db.session @classmethod def get_model(cls): return cls._model @classmethod def _process_filter_signal(cls, query, **kwargs): if request.method in ('HEAD', 'GET'): signal = on_filter_read elif request.method in ('POST', 'PATCH'): signal = on_filter_update elif request.method in ('DELETE',): signal = on_filter_delete else: return query for _, response in signal.send(cls, **kwargs): if callable(response): query = response(query) return query @classmethod def get_item_list(cls): """ Pagination is only supported for resources accessed through :class:`Relationship` if the relationship to the parent is `lazy='dynamic'`. """ query = cls._model.query if isinstance(query, list): abort(500, message='Nesting not supported for this resource.') return cls._process_filter_signal(query) @classmethod def get_item_list_for_relationship(cls, relationship, parent_item): query = getattr(parent_item, relationship) if isinstance(query, list): abort(500, message='Nesting not supported for this resource.') return cls._process_filter_signal(query) @classmethod def create_item_relationship(cls, id_, relationship, parent_item): item = cls.get_item_for_id(id_) before_create_relationship.send(cls, parent_item=parent_item, relationship=relationship, item=item) session = cls._get_session() try: getattr(parent_item, relationship).append(item) session.commit() except: session.rollback() raise after_create_relationship.send(cls, parent_item=parent_item, relationship=relationship, item=item) return item @classmethod def delete_item_relationship(cls, id_, relationship, parent_item): item = cls.get_item_for_id(id_) before_delete_relationship.send(cls, parent_item=parent_item, relationship=relationship, item=item) session = cls._get_session() try: getattr(parent_item, relationship).remove(item) session.commit() except: session.rollback() raise after_delete_relationship.send(cls, parent_item=parent_item, relationship=relationship, item=item) @classmethod def get_item_for_id(cls, id_): try: # SQLAlchemy's .get() does not work well with .filter() return cls.get_item_list().filter(cls._model_id_column == id_).one() except NoResultFound: abort(404) @classmethod def create_item(cls, dct): # noinspection PyCallingNonCallable item = cls._model() for key, value in six.iteritems(dct): setattr(item, key, value) before_create_item.send(cls, item=item) session = cls._get_session() try: session.add(item) session.commit() except: session.rollback() raise after_create_item.send(cls, item=item) return item @classmethod def update_item(cls, id_, dct, partial=False): item = cls.get_item_for_id(id_) session = cls._get_session() try: before_update_item.send(cls, item=item, changes=dct, partial=partial) for key, value in six.iteritems(dct): setattr(item, key, value) session.commit() except: session.rollback() raise after_update_item.send(cls, item=item) return item @classmethod def delete_item(cls, id_): item = cls.get_item_for_id(id_) before_delete_item.send(cls, item=item) session = cls._get_session() session.delete(item) session.commit() after_delete_item.send(cls, item=item) _pagination_parser = reqparse.RequestParser() _pagination_parser.add_argument('per_page', location='args', type=int, default=20) # 20 _pagination_parser.add_argument('page', location='args', type=int, default=1) @classmethod def marshal_item_list(cls, item_list, paginate=True): """ Like :meth:`PrestoResource.marshal_item_list()` except that :attr:`object_list` can be a :class:`Pagination` object, in which case a paginated result will be returned. """ if isinstance(item_list, BaseQuery): if paginate: args = cls._pagination_parser.parse_args() item_list = item_list.paginate(page=args.page, per_page=args.per_page) else: item_list = item_list.all() if isinstance(item_list, Pagination): links = [(request.path, item_list.page, item_list.per_page, 'self')] if item_list.has_prev: links.append((request.path, 1, item_list.per_page, 'first')) links.append((request.path, item_list.page - 1, item_list.per_page, 'prev')) if item_list.has_next: links.append((request.path, item_list.pages, item_list.per_page, 'last')) links.append((request.path, item_list.page + 1, item_list.per_page, 'next')) headers = {'Link': ','.join((LINK_HEADER_FORMAT_STR.format(*link) for link in links))} return super(ModelResource, cls).marshal_item_list(item_list.items), 200, headers # fallback: return super(ModelResource, cls).marshal_item_list(item_list) class PolymorphicModelResource(ModelResource): """ :class:`PolymorphicModelResource` is identical to :class:`ModelResource`, except that when it marshals an item that has a different class than the ``model`` attribute defined in :class:`Meta`, it marshals the contents of that model separately from the inherited resource and adds it to the marshalled dictionary as a property with the name of the inherited resource. e.g. .. code-block:: javascript { "resource_uri": "/polymorphic_resource/1", // polymorphic_resource properties "base_resource": { "resource_uri": "/base_resource/1", // base_resource properties } } :class:`PolymorphicModelResource` is designed to be used with SQLAlchemy models that make use of `SQLAlchemy's polymorphic inheritance <http://docs.sqlalchemy.org/en/latest/orm/inheritance.html>`_. """ @classmethod def marshal_item(cls, item): resource = cls.api.get_resource_for_model(item.__class__) marshaled = super(PolymorphicModelResource, cls).marshal_item(item) if resource and resource != cls: marshaled[resource.resource_name.replace('/', '__')] = resource.marshal_item(item) # fallback: return marshaled
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from ryu.base import app_manager from ryu.controller import ofp_event from ryu.controller.handler import CONFIG_DISPATCHER, \ MAIN_DISPATCHER, DEAD_DISPATCHER from ryu.controller.handler import set_ev_cls from ryu.ofproto import ofproto_v1_3 from ryu.lib.packet import packet from ryu.lib.packet import ethernet, ipv4, icmp, arp from ryu.controller import dpset from ryu.lib.packet.lldp import LLDP_MAC_NEAREST_BRIDGE # from ryu.lib.packet.ether_types import ETH_TYPE_LLDP import array from ryu.lib import hub from operator import attrgetter import json import shutil import os import subprocess import time import networkx as nx from ryu.topology import event, switches from ryu.topology.api import get_switch, get_link # output ovs switch hostname and DPID pairs OFP_SWITCHES_LIST = \ './network-data/ofp_switches_list.db' OFP_SWITCHES_LIST_PREVIOUS = \ './network-data/ofp_switches_list_prev.db' OFP_SWITCHES_LIST_SCRIPT = \ './scripts/remote_ovs_operation/get_switch_ofpbr_datapath_id.sh' OFP_SWITCHES_FLOW_STATS = \ './network-data/ofp_switches_{0}_flow_stats.db' OFP_SWITCHES_FLOW_STATS_PREVIOUS = \ './network-data/ofp_switches_{0}_flow_stats_prev.db' OFP_SWITCHES_PORT_STATS = \ './network-data/ofp_switches_{0}_port_stats.db' OFP_SWITCHES_PORT_STATS_PREVIOUS = \ './network-data/ofp_switches_{0}_port_stats_prev.db' OFP_SINGLE_SHOREST_PATH = './network-data/ofp_single_shortest_path.db' OFP_ALL_PAIRS_SHOREST_PATH = './network-data/ofp_all_pairs_shortest_path.db' OFP_ALL_PATHS_SHOREST_PATH = './network-data/ofp_all_paths_shortest_path.db' OFP_MAC_TO_PORT = './network-data/ofp_mac_to_port.db' class SimpleSwitch13(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] _CONTEXTS = { 'dpset': dpset.DPSet, } def __init__(self, *args, **kwargs): super(SimpleSwitch13, self).__init__(*args, **kwargs) self.mac_to_port = {} self.dpset = kwargs['dpset'] self.datapaths = {} # create thread for traffic monitoring self.monitor_thread = hub.spawn(self._monitor) self.hostname_list = {} self.net = nx.DiGraph() self.nodes = {} self.links = {} self.no_of_nodes = 0 self.no_of_links = 0 self.topology_data_app = self # Given DPID, output hostname in string def _hostname_Check(self, datapath): # Given decimal datapath ID, return hostname with open(OFP_SWITCHES_LIST_PREVIOUS, 'r') as iff: for line in iff: hostname, dpid = line.split() self.hostname_list[int(dpid, 16)] = hostname # print self.hostname_list # NEED add some datapath check later if datapath not in self.hostname_list.keys(): return datapath else: return self.hostname_list[datapath] ################################################################### # ofp_event.EventOFPSwitchFeatures #################################################################### @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def switch_features_handler(self, ev): self._update_switch_dpid_list() self.logger.info("Switch Feature reply") msg = ev.msg datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser self.logger.info( " datapath in decimal %s,in hex %s", datapath.id, hex(int(datapath.id))) self.logger.info(' OFPSwitchFeatures received: ' 'datapath_id=0x%016x n_buffers=%d ' 'n_tables=%d auxiliary_id=%d ' 'capabilities=0x%08x', msg.datapath_id, msg.n_buffers, msg.n_tables, msg.auxiliary_id, msg.capabilities) # install table-miss flow entry # # We specify NO BUFFER to max_len of the output action due to # OVS bug. At this moment, if we specify a lesser number, e.g., # 128, OVS will send Packet-In with invalid buffer_id and # truncated packet data. In that case, we cannot output packets # correctly. The bug has been fixed in OVS v2.1.0. match = parser.OFPMatch() actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)] self.add_flow(datapath, 0, match, actions) ################################################################### # update switch dpid every 10s #################################################################### def _update_switch_dpid_list(self): # update and write to ./network-data/ofp_switches_list.db # it will be called when switch in and out subprocess.call([OFP_SWITCHES_LIST_SCRIPT]) shutil.copyfile(OFP_SWITCHES_LIST, OFP_SWITCHES_LIST_PREVIOUS) def _udpate_switch_port_stats(self): # write to ./network-data/ofp-switch-port-stats.db pass ################################################################### # add flow #################################################################### def add_flow(self, datapath, priority, match, actions, buffer_id=None): ofproto = datapath.ofproto parser = datapath.ofproto_parser inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] if buffer_id: mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id, priority=priority, match=match, instructions=inst) else: mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst) datapath.send_msg(mod) ################################################################### # EventOFPPacketIn handler #################################################################### @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) def _packet_in_handler(self, ev): # If you hit this you might want to increase # the "miss_send_length" of your switch if ev.msg.msg_len < ev.msg.total_len: self.logger.debug("packet truncated: only %s of %s bytes", ev.msg.msg_len, ev.msg.total_len) msg = ev.msg datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser in_port = msg.match['in_port'] pkt = packet.Packet(msg.data) eth = pkt.get_protocols(ethernet.ethernet)[0] pkt_arp = pkt.get_protocol(arp.arp) dst = eth.dst src = eth.src # eth_proto = eth.protocol_name # do not forward LLCP packet in message if dst == LLDP_MAC_NEAREST_BRIDGE: return if pkt_arp: # flood all the ARP packages and save all the requests in self.arp_request and self.arp_reply print "ARP: %s" % pkt_arp.opcode self.logger.info("packet in %s %s %s %s", datapath.id, src, dst, in_port) dpid = hex(datapath.id) self.mac_to_port.setdefault(dpid, {}) # learn a mac address to avoid FLOOD next time. self.mac_to_port[dpid][src] = in_port if dst in self.mac_to_port[dpid]: out_port = self.mac_to_port[dpid][dst] else: out_port = ofproto.OFPP_FLOOD self.logger.info("packet in dpid=%s src_mac=%s dst_mac=%s \ in_port=%s, out_port=%s", dpid, src, dst, in_port, out_port) actions = [parser.OFPActionOutput(out_port)] # install a flow to avoid packet_in next time if out_port != ofproto.OFPP_FLOOD: match = parser.OFPMatch(in_port=in_port, eth_dst=dst) # verify if we have a valid buffer_id, if yes avoid to send both # flow_mod & packet_out if msg.buffer_id != ofproto.OFP_NO_BUFFER: self.add_flow(datapath, 1, match, actions, msg.buffer_id) return else: self.add_flow(datapath, 1, match, actions) data = None if msg.buffer_id == ofproto.OFP_NO_BUFFER: data = msg.data out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=data) datapath.send_msg(out) # print "mac_to_port:", self.mac_to_port ################################################################### # output shortest path for all pairs for all switches (nodes) in every 10s #################################################################### def _single_shortest_path(self): # print "Printing shortest Path..." # print nx.shortest_path(self.net) with open(OFP_SINGLE_SHOREST_PATH, 'w') as outp: for src in self.net.nodes(): for dst in self.net.nodes(): if src != dst: shortest_path = nx.shortest_path(self.net, src, dst) outp.write("%s -> %s %s" % (self._hostname_Check(src), self._hostname_Check(dst), [self._hostname_Check(i) for i in shortest_path])) outp.write("\n") # print self._hostname_Check(src), " -> ",\ # self._hostname_Check(dst), " ",\ # [self._hostname_Check(i) for i in shortest_path] def _all_paris_shortest_path(self): # print one shortest path for all node pairs # print nx.shortest_path(self.net) with open(OFP_ALL_PAIRS_SHOREST_PATH, 'w') as outp: shortest_path = nx.all_pairs_dijkstra_path(self.net) for src in shortest_path.keys(): for dst in shortest_path[src]: outp.write("%s -> %s %s\n" % (self._hostname_Check(src), self._hostname_Check(dst), [self._hostname_Check(i) for i in shortest_path[src][dst]])) # print self._hostname_Check(src), " -> ", self._hostname_Check(dst),\ # " ", [self._hostname_Check(i) # for i in shortest_path[src][dst]] def _all_paths_shortest_path(self): # print all the shortest paths for each node pair with open(OFP_ALL_PATHS_SHOREST_PATH, 'w') as outp: for src in self.net.nodes(): for dst in self.net.nodes(): if src != dst: shortest_path = nx.all_shortest_paths(self.net, src, dst) for each_path_list in shortest_path: outp.write("%s -> %s %s" % (self._hostname_Check(src), self._hostname_Check(dst), [self._hostname_Check(i) for i in each_path_list])) outp.write("\n") ################################################################### # write mac_to_port in every 10s #################################################################### def _mac_to_port(self): with open(OFP_MAC_TO_PORT, 'w') as outp: for dpid in self.mac_to_port.keys(): for src in self.mac_to_port[dpid]: outp.write("dpid=%s src_mac=%s out_port=%s\n" % (self._hostname_Check(dpid), src, self.mac_to_port[dpid][src])) ################################################################### # Refresh Network nodes and links every 10s #################################################################### @set_ev_cls(event.EventSwitchEnter) def get_topology_data(self, ev): switch_list = get_switch(self.topology_data_app, None) switches = [switch.dp.id for switch in switch_list] self.net.add_nodes_from(switches) links_list = get_link(self.topology_data_app, None) # print links_list links = [(link.src.dpid, link.dst.dpid, {'port': link.src.port_no}) for link in links_list] # print links self.net.add_edges_from(links) links = [(link.dst.dpid, link.src.dpid, {'port': link.dst.port_no}) for link in links_list] # print links self.net.add_edges_from(links) # print "**********List of links", # print self.net.nodes(), '-', self.net.edges() # print [(hex(i), hex(j)) for i, j in self.net.edges()] #################################################################### # Traffc monitor section #################################################################### @set_ev_cls(ofp_event.EventOFPStateChange, [MAIN_DISPATCHER, DEAD_DISPATCHER]) def _state_change_handler(self, ev): datapath = ev.datapath if ev.state == MAIN_DISPATCHER: if datapath.id not in self.datapaths: self.logger.info('register datapath: %016x', datapath.id) self.datapaths[datapath.id] = datapath elif ev.state == DEAD_DISPATCHER: if datapath.id in self.datapaths: self.logger.info('unregister datapath: %016x', datapath.id) del self.datapaths[datapath.id] def _monitor(self): # wait fof around 10s until all the swtiches connected to controller hub.sleep(10) while True: for dp in self.datapaths.values(): self._request_stats(dp) self._update_switch_dpid_list() self._single_shortest_path() self._all_paris_shortest_path() self._all_paths_shortest_path() self._mac_to_port() hub.sleep(10) # send flow and port stats request def _request_stats(self, datapath): self.logger.debug('send stats request: %016x', datapath.id) ofproto = datapath.ofproto parser = datapath.ofproto_parser # flow stats request C->S req = parser.OFPFlowStatsRequest(datapath) datapath.send_msg(req) # port status request C->S req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY) datapath.send_msg(req) @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER) def _flow_stats_reply_handler(self, ev): body = ev.msg.body # print "flow body:", body[1] switch_name = self._hostname_Check(ev.msg.datapath.id) with open(OFP_SWITCHES_FLOW_STATS.format(switch_name), 'w') as iff: self.logger.debug("\n> Flow Stats:") self.logger.debug('datapath ' 'hostname ' 'in-port duration_sec duration_nsec ' ' eth-dst out-port packets bytes') iff.write('datapath ' 'hostname ' 'in-port duration_sec duration_nsec ' ' eth-dst out-port packets bytes\n') self.logger.debug('---------------- ' '---------------- ' '-------- ---------------- -------------- ' '---------------- -------- -------- --------') iff.write('---------------- ' '---------------- ' '-------- ---------------- -------------- ' '---------------- -------- -------- --------\n') for stat in sorted([flow for flow in body if flow.priority == 1], key=lambda flow: (flow.match['in_port'], flow.match['eth_dst'])): iff.write('%16d %16s %8x %16d %16d %17s %8x %8d %8d' % (ev.msg.datapath.id, self._hostname_Check(ev.msg.datapath.id), stat.match['in_port'], stat.duration_sec, stat.duration_nsec, stat.match['eth_dst'], stat.instructions[0].actions[0].port, stat.packet_count, stat.byte_count)) iff.write("\n") self.logger.debug('%16d %16s %8x %16d %16d %17s %8x %8d %8d', ev.msg.datapath.id, self._hostname_Check(ev.msg.datapath.id), stat.match['in_port'], stat.duration_sec, stat.duration_nsec, stat.match['eth_dst'], stat.instructions[0].actions[0].port, stat.packet_count, stat.byte_count) @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER) def _port_stats_reply_handler(self, ev): body = ev.msg.body self.get_topology_data(ev) # print "port body:", body[1] switch_name = self._hostname_Check(ev.msg.datapath.id) with open(OFP_SWITCHES_PORT_STATS.format(switch_name), 'w') as iff: self.logger.debug("\n> Port Stats:") self.logger.debug('datapath ' 'hostname ' 'port duration_sec duration_nsec ' 'rx-pkts rx-bytes rx-error ' 'tx-pkts tx-bytes tx-error') iff.write('datapath ' 'hostname ' 'port duration_sec duration_nsec ' 'rx-pkts rx-bytes rx-error ' 'tx-pkts tx-bytes tx-error\n') self.logger.debug('---------------- ' '-------------- ' '-------- ---------------- -------------- ' '-------- -------- -------- ' '-------- -------- --------') iff.write('---------------- ' '-------------- ' '-------- ------------ -------------- ' '-------- -------- -------- ' '-------- -------- --------\n') for stat in sorted(body, key=attrgetter('port_no')): self.logger.debug('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d', ev.msg.datapath.id, self._hostname_Check(ev.msg.datapath.id), stat.port_no, stat.duration_sec, stat.duration_nsec, stat.rx_packets, stat.rx_bytes, stat.rx_errors, stat.tx_packets, stat.tx_bytes, stat.tx_errors) iff.write('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d' % (ev.msg.datapath.id, self._hostname_Check(ev.msg.datapath.id), stat.port_no, stat.duration_sec, stat.duration_nsec, stat.rx_packets, stat.rx_bytes, stat.rx_errors, stat.tx_packets, stat.tx_bytes, stat.tx_errors)) iff.write("\n") # The switch notifies controller of change of ports. @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) def _port_status_handler(self, ev): msg = ev.msg reason = msg.reason port_no = msg.desc.port_no ofproto = msg.datapath.ofproto if reason == ofproto.OFPPR_ADD: self.logger.info("port added %s", port_no) elif reason == ofproto.OFPPR_DELETE: self.logger.info("port deleted %s", port_no) elif reason == ofproto.OFPPR_MODIFY: self.logger.info("port modified %s", port_no) else: self.logger.info("Illeagal port state %s %s", port_no, reason) # This will turn on Web restAPI app_manager.require_app('ryu.app.rest_topology') app_manager.require_app('ryu.app.ws_topology') app_manager.require_app('ryu.app.ofctl_rest') # app_manager.require_app('my_traffic_monitor') app_manager.require_app('ryu.app.gui_topology.gui_topology') # print "Project Path", PATH
#!/usr/bin/python3 ''' This module is for QIIME analysis of metagenomic data. Our input pipeline is from Illumina based data. This module will NOT attempt to install QIIME on this machine. It is complicated. Written by TicklishGiraffe ''' import tkinter, os, sys, subprocess, shutil, logging, datetime, time, glob, csv, fnmatch from tkinter import filedialog as fd from subprocess import Popen, PIPE currentmonth=datetime.datetime.now().strftime('%m') currentday=datetime.datetime.now().strftime('%d') currentyear=datetime.datetime.now().strftime('%Y') logname='metagenomics_log_started_on_'+currentmonth+'_'+currentday+'_'+currentyear logger=logging.getLogger(__name__) logger.setLevel(logging.INFO) handler=logging.FileHandler(logname) formatter=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) #Test for qiime try: qiime=subprocess.call('qiime print_qiime_config.py', shell=True) if qiime==0: logger.info('QIIME is installed on this machine') else: logger.info('QIIME is not currently installed on this machine. Advise downloading full BIOLINUX os for ease of install') error=tkinter.Tk() errorlabel=tkinter.Label(error, text='Error has arrisen, please see log') errorlabel.pack() errorbutton=tkinter.Button(error, text='Exit', command=error.destroy) errorbutton.pack() error.mainloop() exit() except: logger.info('Cannot communicate with the system?') error2=tkinter.Tk() error2label=tkinter.Label(error2, text='Error has arrisen, please see log') error2button=tkinter.Button(error2, text='Exit', command=error2.destroy) error2label.pack() error2button.pack() error2.mainloop() exit() def metagenomics(folder, mapping, choice): logger.info('The folder is '+folder+', the mapping file is '+mapping+', and the analysis is '+choice) qiimespot=folder+'/QIIME_Analysis' os.mkdir(qiimespot) #make qiime config file since they broke it configloc=os.path.expanduser('~/') if os.path.exists(configloc+'.qiime_config'): logger.info('Fixed QIIME Config already exists') else: qiimeconfig=open(configloc+'.qiime_config','w') qiimeconfig.write('blastmat_dir\t/usr/sharencbi/data\npick_otus_reference_seqs_fp\t/usr/lib/python2.7/dist-packages/qiime_default_reference/gg_13_8_otus/rep_set/97_otus.fasta\njobs_to_start\t1\nqiime_scripts_dir\t/usr/lib/qiime/bin/\nworking_dir\t.\npynast_template_alignment_fp\t/usr/share/qiime/data/core_set_aligned.fasta.imputed\npython_exe_fp\tpython\ntemp_dir\t/tmp\nassign_taxonomy_reference_seqs_fp\t/usr/lib/python2.7/dist-packages/qiime_default_reference/gg_13_8_otus/rep_set/97_otus.fasta\nblastall_fp\tblastall\nseconds_to_sleep\t60\nassign_taxonomy_id_to_taxonomy_fp\t/usr/lib/python2.7/dist-packages/qiime_default_reference/gg_13_8_otus/taxonomy/97_otu_taxonomy.txt') qiimeconfig.close() logger.info('Created QIIME config file') #copy the FASTQs fastqcopy=[] for root, dirnames, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, '*.fastq.gz'): fastqcopy.append(os.path.join(root, filename)) for fastq in fastqcopy: shutil.copy(fastq, qiimespot) logger.info(fastq+' has been copied') #unzip os.chdir(qiimespot)#CHANGES DIRERCTORY HERE try: unzip=Popen('gunzip *.gz', shell=True,stdout=PIPE, stderr=PIPE) stdout, stderr=unzip.communicate() logger.info('Unzipped all fastqs') except: logger.info('Unzipping failed, aborting') exit() #Reverse complement strand 2 readtwo=[os.path.basename(x) for x in glob.glob(qiimespot+'/?*R2_00?*')] for read in readtwo: try: comp=Popen('fastx_reverse_complement -i '+qiimespot+'/'+read+' -o '+qiimespot+'/'+read+'-RC.fastq', shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr=comp.communicate() logger.info(read+' has been reverse complemented') except: logger.info('Uh oh, check if FASTX tools installed (get biolinux...) or something went wrong, aborting') exit() #Remove old read twos try: rm2=Popen('rm ?*R2_001.fastq', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=rm2.communicate() logger.info('Unconverted Read twos have been removed') except: logger.info('Error removing old read twos, aborting') exit() #rename the new R2 to normal format [os.rename(f, f.replace(".fastq-RC","")) for f in os.listdir('.') if not f.startswith('.')] logger.info("Read two's have been renamed") #Concatenate R1 and R2 basenamestotal=[os.path.basename(x) for x in glob.glob(qiimespot+'/?*R1_00?*')] basenamescat=[] for name in basenamestotal: basenamescat.append(name.replace('_R1_001.fastq','')) for base in basenamescat: X=base+'_R1_001.fastq' Y=base+'_R2_001.fastq' try: concat=Popen('cat '+X+' '+Y+' > '+base+'_R1_001-combined.fastq', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=concat.communicate() logger.info(base+'has had read 1 and read 2 combined') except: logger.info('Concatenation issues, aborting') exit() #final bit of renaming [os.rename(f, f.replace("-combined.fastq","")) for f in os.listdir('.') if not f.startswith('.')] #remove old read1 and read2 try: rm22=Popen('rm ?*R2_001.fastq', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=rm22.communicate() if stdout: logger.info('Unconcatenated Read 2s have been removed') rm1=Popen('rm ?*R1_001.fastq', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=rm1.communicate() if stdout: logger.info('Unconcatenated Read 1s have been removed') except: logger.info('Error removing unconcatenated files, aborting') exit() [os.rename(f, f.replace("_L001_R1_001","")) for f in os.listdir('.') if not f.startswith('.')] [os.rename(f, f.replace("_R1_001","")) for f in os.listdir('.') if not f.startswith('.')] logger.info('All files now properly renamed for analysis...') #Creates map files logger.info('Creating map files now...') catbasenames=[os.path.basename(x) for x in glob.glob(qiimespot+'/*')] #copy and create the mapping file mapspot=qiimespot+'/maps' os.mkdir(mapspot) shutil.copy(mapping,mapspot+"/map.csv") text_list=[] with open(mapspot+"/map.csv", "r") as my_input_file: for line in my_input_file: line=line.split(',') text_list.append('\t'.join(line)) with open(mapspot+"/map", "w") as my_output_file: for line in text_list: my_output_file.write(line) logger.info('Map file has been created') for file in catbasenames: mapname=file.strip('.fastq') with open(mapspot+'/'+mapname, 'w', newline='') as csvfile: mapwriter=csv.writer(csvfile, delimiter='\t', quotechar=' ', quoting=csv.QUOTE_MINIMAL) mapwriter.writerow(['#SampleID']+['BarcodeSequence']+['LinkerPrimerSequence']) mapwriter.writerow([mapname]) logger.info('Map file written for '+file) #split libraries splitnames=[] os.mkdir('./fna') for name in catbasenames: splitnames.append(name.strip('.fastq')) for fastq in splitnames: try: split=Popen('qiime split_libraries_fastq.py -i '+fastq+' -m maps/'+fastq+' -o output/'+fastq+" --barcode_type 'not-barcoded' --sample_id "+fastq, shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=split.communicate() logger.info(fastq+' has been split') shutil.copy('output/'+fastq+'/seqs.fna','fna/seqs'+fastq+'.fna') logger.info('FNA file for '+fastq+' has been moved to FNA folder') except: logger.info('Splitting for '+fastq+' has failed, aborting') exit() #add qime lables try: labels=Popen('qiime add_qiime_labels.py -m maps/map -i fna -c InputFileName -o labelled',shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=labels.communicate() logger.info('QIIME labels added') logger.info(stderr) except: logger.info('QIIME labels error. This is usually a mapping error gone wild. Check map file and restart. Aborting') exit() #Where things get different for fungal or bacterial. if choice=='Fungal': logger.info('Fungal analysis in progress...') logger.info('Making fungal parameter file') fungalfasta=[] fungaltxt=[] #trying to find the UNITE files since they arent built in for root, dirs, files in os.walk('/'): if 'UNITE97_08_2015.fasta' in files: fungalfasta.append(os.path.join(root,'UNITE97_08_2015.fasta')) logger.info('UNITE fasta file has been found') elif 'UNITE97_08_2015.txt' in files: fungaltxt.append(os.path.join(root,'UNITE97_08_2015.txt')) logger.info('UNITE txt file has been found') else: logger.info('The UNITE files are not on this computer. Please aquire the UNITE database and then try again.') exit() fungal_parameters=open(qiimespot+'/fungalparameters','w') fungal_parameters.write('#Qiime Parameters File\n#pick_rep_set.py\nassign_taxonomy:reference_seqs_fp\t'+fungalfasta+'\nassign_taxonomy:id_to_taxonomy_fp\t'+fungaltxt+'\nassign_taxonomy:assignment_method\tblast') fungal_parameters.close() logger.info('Fungal parameters file made') logger.info('Fungal analysis in progress...') #open reference otus try: openf=Popen('qiime pick_open_reference_otus.py -a -O 11 -i labelled/combined_seqs.fna -o open_reference -f -p '+qiimespot+'/fungalparameters'+' -r '+fungalfasta+' --suppress_align_and_tree', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=openf.communicate() logger.info('Open reference otus successful') except: logger.info('Error during open reference otus, aborting') exit() #core diversity logger.info('Core diversity analysis beginning...') try: coref=Popen('qiime core_diversity_analyses.py -i open_reference/otu_table_mc2_w_tax.biom -a -O 11 -e 1000 -m maps/map -o core_diversity -p '++qiimespot+'/fungalparameters'+' --nonphylogenetic_diversity', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=coref.communicate() logger.info('Core diversity analysis completed.') except: logger.info('Error during core diversity analysis, aborting') exit() #creating useable biom file try: biomf=Popen('biom convert -i open_reference/otu_table_mc2_w_tax.biom -o open_reference/Megan_compatible_otu_table.biom --table-type="OTU table" --to-json',shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=biomf.communicate() logger.info('Biom file converted successfully') except: logger.info('Biom file error, aborting') exit() if os.path.exists(qiimespot+'/open_reference/Megan_compatible_otu_table.biom'): logger.info('QIIME analysis successful for Fungal Samples') else: logger.info('Error occurred somewhere') elif choice=='Bacterial': logger.info('Bacterial analysis in progress...') #open reference otus try: openb=Popen('qiime pick_open_reference_otus.py -a -O 11 -i labelled/combined_seqs.fna -o open_reference -f', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=openb.communicate() logger.info('Open reference otus successful') except: logger.info('Error during open reference otus, aborting') exit() #core diversity logger.info('Core diversity analysis beginning...') try: coreb=Popen('qiime core_diversity_analyses.py -i open_reference/otu_table_mc2_w_tax.biom -a -O 11 -t open_reference/rep_set.tre -e 1000 -m maps/map -o core_diversity', shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=coreb.communicate() logger.info('Core diversity analysis completed.') except: logger.info('Error during core diversity analysis, aborting') exit() #creating useable biom file try: biomb=Popen('biom convert -i open_reference/otu_table_mc2_w_tax.biom -o open_reference/Megan_compatible_otu_table.biom --table-type="OTU table" --to-json',shell=True,stdout=PIPE,stderr=PIPE) stdout,stderr=biomb.communicate() logger.info('Biom file converted successfully') except: logger.info('Biom file error, aborting') exit() if os.path.exists(qiimespot+'/open_reference/Megan_compatible_otu_table.biom'): logger.info('QIIME analysis successful for Bacterial Samples') else: logger.info('Error occurred somewhere') else: logger.info('Something went wrong determining fungal or bacterial. Aborting') exit() #gui set up meta=tkinter.Tk() meta.config(background='goldenrod') meta.title('QIIME Metagenomics with GUI') startmessage=tkinter.Message(meta, width=700, background='goldenrod', font=('Helvetica',16), text=''' Welcome to Basic QIIME Metagenomic Analysis. This program will run the absolute baseline analysis for 16s Metagenomic Data. Any alterations to the QIIME setup will have to be done through traditional QIIME exectution Or later versions of this GUI. While searching for your data, please select the folder that contains all FASTQ files relevant to this project Your mapping file will be critical for this. Any errors in the mapping file will lead to analyis abort or erroneous output. Please see the template mapping file or find an administrator if you have any questions. Please ensure that your "InputFileName" column is labelled as "seqs'Sample Name as entered on Illumina Sample Sheet'_'sample number as entered on Illumina Sample Sheet'.fna" For example, sample called TG1 and is sample 4 would be "seqsTG1_S4.fna" on the sample sheet ''') startmessage.pack() folderclick=False sampleclick=False typechosen=False forb=tkinter.IntVar(meta) def clickcheck(): if folderclick==True and sampleclick==True and typechosen==True: runbutton.config(state='active') else: pass def runfolder(): global folder folder=fd.askdirectory() runfolderbutton.config(state='disabled') global folderclick folderclick=True clickcheck() logger.info('Runfolder is '+folder) return folder def mappingsheet(): global maps maps=fd.askopenfilename() mappingsheetbutton.config(state='disabled') global sampleclick sampleclick=True clickcheck() logger.info('Sample sheet/Mapping file is '+maps) return maps def typechoose(): choice=forb.get() fungalbutton.config(state='disabled') bacbutton.config(state='disabled') global typechosen typechosen=True clickcheck() global borf if choice==1: borf='Fungal' logger.info('Fungal analysis') return borf elif choice==2: borf='Bacterial' logger.info('Bacterial analysis') return borf def go(): fungalbutton.pack_forget() bacbutton.pack_forget() runfolderbutton.pack_forget() mappingsheetbutton.pack_forget() choicemessage.pack_forget() startmessage.pack_forget() exitbutton.pack_forget() runbutton.pack_forget() logger.info('Initializing...') runningmessage=tkinter.Message(meta, width=700, background='goldenrod', font=('Helvetica',16), text=''' QIIME Analysis running... Please be patient while this occurs. Files will be getting copied and moved around so do not be alarmed. The processes involved will probably utilize most of your compute power for several hours Personally, I would advise doing nothing else on this machine during that time, you may slow its progress or abort it. Patience young grasshopper... ''') runningmessage.pack() time.sleep(10) metagenomics(folder,maps,borf) if os.path.exists(folder+'/QIIME_Analysis/open_reference/Megan_compatible_otu_table.biom'): runningmessage.pack_forget() endmessage=tkinter.Message(meta, width=700, background='goldenrod', font=('Helvetica',16),text=''' QIIME Analysis completed successfully! Enjoy your data! ''') endmessage.pack() exitbutton.pack() else: runningmessage.pack_forget() errorendmessage=tkinter.Message(meta,width=700, background='goldenrod',font=('Helvetica',16),text=''' An error has occured during the data processing. Please see the log for more information on what went wrong and try again! ''') errorendmessage.pack() exitbutton.pack() runfolderbutton=tkinter.Button(meta, width=50, font=('Helvetica',14),text='Click here for Run Folder', command=runfolder) mappingsheetbutton=tkinter.Button(meta, width=50, font=('Helvetica',14),text='Click here for Mapping file', command=mappingsheet) fungalbutton=tkinter.Radiobutton(meta, width=30, background='goldenrod',font=('Helvetica',14),text='Fungal analysis', variable=forb, value=1, command=typechoose) bacbutton=tkinter.Radiobutton(meta, width=30, background='goldenrod',font=('Helvetica',14),text='Bacterial analysis', variable=forb, value=2, command=typechoose) choicemessage=tkinter.Message(meta, width=500, background='goldenrod', font=('Helvetica', 14), text='Choose if this will be Fungal or Bacterial analysis') runfolderbutton.pack() mappingsheetbutton.pack() choicemessage.pack() fungalbutton.pack() bacbutton.pack() runbutton=tkinter.Button(meta, width=50, font=('Helvetica',14), text='Click here once input is complete to start QIIME Analysis!', command=go) runbutton.config(state='disabled') runbutton.pack() exitbutton=tkinter.Button(meta, width=50, font=('Helvetica', 14), text='Exit', command=meta.destroy) exitbutton.pack() meta.mainloop()
# pylint: disable-msg=E1101,W0612 import operator import pytest from warnings import catch_warnings from numpy import nan import numpy as np import pandas as pd from pandas import Series, DataFrame, bdate_range, Panel from pandas.core.dtypes.common import ( is_bool_dtype, is_float_dtype, is_object_dtype, is_float) from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import BDay from pandas.util import testing as tm from pandas.compat import lrange from pandas import compat from pandas.core.sparse import frame as spf from pandas._libs.sparse import BlockIndex, IntIndex from pandas.core.sparse.api import SparseSeries, SparseDataFrame, SparseArray from pandas.tests.frame.test_api import SharedWithSparse class TestSparseDataFrame(SharedWithSparse): klass = SparseDataFrame # SharedWithSparse tests use generic, klass-agnostic assertion _assert_frame_equal = staticmethod(tm.assert_sp_frame_equal) _assert_series_equal = staticmethod(tm.assert_sp_series_equal) def setup_method(self, method): self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], 'C': np.arange(10, dtype=np.float64), 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]} self.dates = bdate_range('1/1/2011', periods=10) self.orig = pd.DataFrame(self.data, index=self.dates) self.iorig = pd.DataFrame(self.data, index=self.dates) self.frame = SparseDataFrame(self.data, index=self.dates) self.iframe = SparseDataFrame(self.data, index=self.dates, default_kind='integer') self.mixed_frame = self.frame.copy(False) self.mixed_frame['foo'] = pd.SparseArray(['bar'] * len(self.dates)) values = self.frame.values.copy() values[np.isnan(values)] = 0 self.zorig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'], index=self.dates) self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'], default_fill_value=0, index=self.dates) values = self.frame.values.copy() values[np.isnan(values)] = 2 self.fill_orig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'], index=self.dates) self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'], default_fill_value=2, index=self.dates) self.empty = SparseDataFrame() def test_fill_value_when_combine_const(self): # GH12723 dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float') df = SparseDataFrame({'foo': dat}, index=range(6)) exp = df.fillna(0).add(2) res = df.add(2, fill_value=0) tm.assert_sp_frame_equal(res, exp) def test_as_matrix(self): empty = self.empty.as_matrix() assert empty.shape == (0, 0) no_cols = SparseDataFrame(index=np.arange(10)) mat = no_cols.as_matrix() assert mat.shape == (10, 0) no_index = SparseDataFrame(columns=np.arange(10)) mat = no_index.as_matrix() assert mat.shape == (0, 10) def test_copy(self): cp = self.frame.copy() assert isinstance(cp, SparseDataFrame) tm.assert_sp_frame_equal(cp, self.frame) # as of v0.15.0 # this is now identical (but not is_a ) assert cp.index.identical(self.frame.index) def test_constructor(self): for col, series in compat.iteritems(self.frame): assert isinstance(series, SparseSeries) assert isinstance(self.iframe['A'].sp_index, IntIndex) # constructed zframe from matrix above assert self.zframe['A'].fill_value == 0 tm.assert_numpy_array_equal(pd.SparseArray([1., 2., 3., 4., 5., 6.]), self.zframe['A'].values) tm.assert_numpy_array_equal(np.array([0., 0., 0., 0., 1., 2., 3., 4., 5., 6.]), self.zframe['A'].to_dense().values) # construct no data sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10)) for col, series in compat.iteritems(sdf): assert isinstance(series, SparseSeries) # construct from nested dict data = {} for c, s in compat.iteritems(self.frame): data[c] = s.to_dict() sdf = SparseDataFrame(data) tm.assert_sp_frame_equal(sdf, self.frame) # TODO: test data is copied from inputs # init dict with different index idx = self.frame.index[:5] cons = SparseDataFrame( self.frame, index=idx, columns=self.frame.columns, default_fill_value=self.frame.default_fill_value, default_kind=self.frame.default_kind, copy=True) reindexed = self.frame.reindex(idx) tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False) # assert level parameter breaks reindex with pytest.raises(TypeError): self.frame.reindex(idx, level=0) repr(self.frame) def test_constructor_ndarray(self): # no index or columns sp = SparseDataFrame(self.frame.values) # 1d sp = SparseDataFrame(self.data['A'], index=self.dates, columns=['A']) tm.assert_sp_frame_equal(sp, self.frame.reindex(columns=['A'])) # raise on level argument pytest.raises(TypeError, self.frame.reindex, columns=['A'], level=1) # wrong length index / columns with tm.assert_raises_regex(ValueError, "^Index length"): SparseDataFrame(self.frame.values, index=self.frame.index[:-1]) with tm.assert_raises_regex(ValueError, "^Column length"): SparseDataFrame(self.frame.values, columns=self.frame.columns[:-1]) # GH 9272 def test_constructor_empty(self): sp = SparseDataFrame() assert len(sp.index) == 0 assert len(sp.columns) == 0 def test_constructor_dataframe(self): dense = self.frame.to_dense() sp = SparseDataFrame(dense) tm.assert_sp_frame_equal(sp, self.frame) def test_constructor_convert_index_once(self): arr = np.array([1.5, 2.5, 3.5]) sdf = SparseDataFrame(columns=lrange(4), index=arr) assert sdf[0].index is sdf[1].index def test_constructor_from_series(self): # GH 2873 x = Series(np.random.randn(10000), name='a') x = x.to_sparse(fill_value=0) assert isinstance(x, SparseSeries) df = SparseDataFrame(x) assert isinstance(df, SparseDataFrame) x = Series(np.random.randn(10000), name='a') y = Series(np.random.randn(10000), name='b') x2 = x.astype(float) x2.loc[:9998] = np.NaN # TODO: x_sparse is unused...fix x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa # Currently fails too with weird ufunc error # df1 = SparseDataFrame([x_sparse, y]) y.loc[:9998] = 0 # TODO: y_sparse is unsused...fix y_sparse = y.to_sparse(fill_value=0) # noqa # without sparse value raises error # df2 = SparseDataFrame([x2_sparse, y]) def test_constructor_preserve_attr(self): # GH 13866 arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0) assert arr.dtype == np.int64 assert arr.fill_value == 0 df = pd.SparseDataFrame({'x': arr}) assert df['x'].dtype == np.int64 assert df['x'].fill_value == 0 s = pd.SparseSeries(arr, name='x') assert s.dtype == np.int64 assert s.fill_value == 0 df = pd.SparseDataFrame(s) assert df['x'].dtype == np.int64 assert df['x'].fill_value == 0 df = pd.SparseDataFrame({'x': s}) assert df['x'].dtype == np.int64 assert df['x'].fill_value == 0 def test_constructor_nan_dataframe(self): # GH 10079 trains = np.arange(100) tresholds = [10, 20, 30, 40, 50, 60] tuples = [(i, j) for i in trains for j in tresholds] index = pd.MultiIndex.from_tuples(tuples, names=['trains', 'tresholds']) matrix = np.empty((len(index), len(trains))) matrix.fill(np.nan) df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float) result = df.to_sparse() expected = pd.SparseDataFrame(matrix, index=index, columns=trains, dtype=float) tm.assert_sp_frame_equal(result, expected) def test_type_coercion_at_construction(self): # GH 15682 result = pd.SparseDataFrame( {'a': [1, 0, 0], 'b': [0, 1, 0], 'c': [0, 0, 1]}, dtype='uint8', default_fill_value=0) expected = pd.SparseDataFrame( {'a': pd.SparseSeries([1, 0, 0], dtype='uint8'), 'b': pd.SparseSeries([0, 1, 0], dtype='uint8'), 'c': pd.SparseSeries([0, 0, 1], dtype='uint8')}, default_fill_value=0) tm.assert_sp_frame_equal(result, expected) def test_dtypes(self): df = DataFrame(np.random.randn(10000, 4)) df.loc[:9998] = np.nan sdf = df.to_sparse() result = sdf.get_dtype_counts() expected = Series({'float64': 4}) tm.assert_series_equal(result, expected) def test_shape(self): # see gh-10452 assert self.frame.shape == (10, 4) assert self.iframe.shape == (10, 4) assert self.zframe.shape == (10, 4) assert self.fill_frame.shape == (10, 4) def test_str(self): df = DataFrame(np.random.randn(10000, 4)) df.loc[:9998] = np.nan sdf = df.to_sparse() str(sdf) def test_array_interface(self): res = np.sqrt(self.frame) dres = np.sqrt(self.frame.to_dense()) tm.assert_frame_equal(res.to_dense(), dres) def test_pickle(self): def _test_roundtrip(frame, orig): result = tm.round_trip_pickle(frame) tm.assert_sp_frame_equal(frame, result) tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False) _test_roundtrip(SparseDataFrame(), DataFrame()) self._check_all(_test_roundtrip) def test_dense_to_sparse(self): df = DataFrame({'A': [nan, nan, nan, 1, 2], 'B': [1, 2, nan, nan, nan]}) sdf = df.to_sparse() assert isinstance(sdf, SparseDataFrame) assert np.isnan(sdf.default_fill_value) assert isinstance(sdf['A'].sp_index, BlockIndex) tm.assert_frame_equal(sdf.to_dense(), df) sdf = df.to_sparse(kind='integer') assert isinstance(sdf['A'].sp_index, IntIndex) df = DataFrame({'A': [0, 0, 0, 1, 2], 'B': [1, 2, 0, 0, 0]}, dtype=float) sdf = df.to_sparse(fill_value=0) assert sdf.default_fill_value == 0 tm.assert_frame_equal(sdf.to_dense(), df) def test_density(self): df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6]) assert df.density == 0.7 df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], 'C': np.arange(10), 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}) assert df.density == 0.75 def test_sparse_to_dense(self): pass def test_sparse_series_ops(self): self._check_frame_ops(self.frame) def test_sparse_series_ops_i(self): self._check_frame_ops(self.iframe) def test_sparse_series_ops_z(self): self._check_frame_ops(self.zframe) def test_sparse_series_ops_fill(self): self._check_frame_ops(self.fill_frame) def _check_frame_ops(self, frame): def _compare_to_dense(a, b, da, db, op): sparse_result = op(a, b) dense_result = op(da, db) fill = sparse_result.default_fill_value dense_result = dense_result.to_sparse(fill_value=fill) tm.assert_sp_frame_equal(sparse_result, dense_result, exact_indices=False) if isinstance(a, DataFrame) and isinstance(db, DataFrame): mixed_result = op(a, db) assert isinstance(mixed_result, SparseDataFrame) tm.assert_sp_frame_equal(mixed_result, sparse_result, exact_indices=False) opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv'] ops = [getattr(operator, name) for name in opnames] fidx = frame.index # time series operations series = [frame['A'], frame['B'], frame['C'], frame['D'], frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]), SparseSeries( [], index=[])] for op in opnames: _compare_to_dense(frame, frame[::2], frame.to_dense(), frame[::2].to_dense(), getattr(operator, op)) # 2304, no auto-broadcasting for i, s in enumerate(series): f = lambda a, b: getattr(a, op)(b, axis='index') _compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f) # rops are not implemented # _compare_to_dense(s, frame, s.to_dense(), # frame.to_dense(), f) # cross-sectional operations series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]), frame.xs(fidx[7]), frame.xs(fidx[5])[:2]] for op in ops: for s in series: _compare_to_dense(frame, s, frame.to_dense(), s, op) _compare_to_dense(s, frame, s, frame.to_dense(), op) # it works! result = self.frame + self.frame.loc[:, ['A', 'B']] # noqa def test_op_corners(self): empty = self.empty + self.empty assert empty.empty foo = self.frame + self.empty assert isinstance(foo.index, DatetimeIndex) tm.assert_frame_equal(foo, self.frame * np.nan) foo = self.empty + self.frame tm.assert_frame_equal(foo, self.frame * np.nan) def test_scalar_ops(self): pass def test_getitem(self): # 1585 select multiple columns sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c']) result = sdf[['a', 'b']] exp = sdf.reindex(columns=['a', 'b']) tm.assert_sp_frame_equal(result, exp) pytest.raises(Exception, sdf.__getitem__, ['a', 'd']) def test_iloc(self): # 2227 result = self.frame.iloc[:, 0] assert isinstance(result, SparseSeries) tm.assert_sp_series_equal(result, self.frame['A']) # preserve sparse index type. #2251 data = {'A': [0, 1]} iframe = SparseDataFrame(data, default_kind='integer') tm.assert_class_equal(iframe['A'].sp_index, iframe.iloc[:, 0].sp_index) def test_set_value(self): # ok, as the index gets converted to object frame = self.frame.copy() with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res = frame.set_value('foobar', 'B', 1.5) assert res.index.dtype == 'object' res = self.frame res.index = res.index.astype(object) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res = self.frame.set_value('foobar', 'B', 1.5) assert res is not self.frame assert res.index[-1] == 'foobar' with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert res.get_value('foobar', 'B') == 1.5 with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res2 = res.set_value('foobar', 'qux', 1.5) assert res2 is not res tm.assert_index_equal(res2.columns, pd.Index(list(self.frame.columns) + ['qux'])) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert res2.get_value('foobar', 'qux') == 1.5 def test_fancy_index_misc(self): # axis = 0 sliced = self.frame.iloc[-2:, :] expected = self.frame.reindex(index=self.frame.index[-2:]) tm.assert_sp_frame_equal(sliced, expected) # axis = 1 sliced = self.frame.iloc[:, -2:] expected = self.frame.reindex(columns=self.frame.columns[-2:]) tm.assert_sp_frame_equal(sliced, expected) def test_getitem_overload(self): # slicing sl = self.frame[:20] tm.assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20])) # boolean indexing d = self.frame.index[5] indexer = self.frame.index > d subindex = self.frame.index[indexer] subframe = self.frame[indexer] tm.assert_index_equal(subindex, subframe.index) pytest.raises(Exception, self.frame.__getitem__, indexer[:-1]) def test_setitem(self): def _check_frame(frame, orig): N = len(frame) # insert SparseSeries frame['E'] = frame['A'] assert isinstance(frame['E'], SparseSeries) tm.assert_sp_series_equal(frame['E'], frame['A'], check_names=False) # insert SparseSeries differently-indexed to_insert = frame['A'][::2] frame['E'] = to_insert expected = to_insert.to_dense().reindex(frame.index) result = frame['E'].to_dense() tm.assert_series_equal(result, expected, check_names=False) assert result.name == 'E' # insert Series frame['F'] = frame['A'].to_dense() assert isinstance(frame['F'], SparseSeries) tm.assert_sp_series_equal(frame['F'], frame['A'], check_names=False) # insert Series differently-indexed to_insert = frame['A'].to_dense()[::2] frame['G'] = to_insert expected = to_insert.reindex(frame.index) expected.name = 'G' tm.assert_series_equal(frame['G'].to_dense(), expected) # insert ndarray frame['H'] = np.random.randn(N) assert isinstance(frame['H'], SparseSeries) to_sparsify = np.random.randn(N) to_sparsify[N // 2:] = frame.default_fill_value frame['I'] = to_sparsify assert len(frame['I'].sp_values) == N // 2 # insert ndarray wrong size pytest.raises(Exception, frame.__setitem__, 'foo', np.random.randn(N - 1)) # scalar value frame['J'] = 5 assert len(frame['J'].sp_values) == N assert (frame['J'].sp_values == 5).all() frame['K'] = frame.default_fill_value assert len(frame['K'].sp_values) == 0 self._check_all(_check_frame) def test_setitem_corner(self): self.frame['a'] = self.frame['B'] tm.assert_sp_series_equal(self.frame['a'], self.frame['B'], check_names=False) def test_setitem_array(self): arr = self.frame['B'] self.frame['E'] = arr tm.assert_sp_series_equal(self.frame['E'], self.frame['B'], check_names=False) self.frame['F'] = arr[:-1] index = self.frame.index[:-1] tm.assert_sp_series_equal(self.frame['E'].reindex(index), self.frame['F'].reindex(index), check_names=False) def test_delitem(self): A = self.frame['A'] C = self.frame['C'] del self.frame['B'] assert 'B' not in self.frame tm.assert_sp_series_equal(self.frame['A'], A) tm.assert_sp_series_equal(self.frame['C'], C) del self.frame['D'] assert 'D' not in self.frame del self.frame['A'] assert 'A' not in self.frame def test_set_columns(self): self.frame.columns = self.frame.columns pytest.raises(Exception, setattr, self.frame, 'columns', self.frame.columns[:-1]) def test_set_index(self): self.frame.index = self.frame.index pytest.raises(Exception, setattr, self.frame, 'index', self.frame.index[:-1]) def test_append(self): a = self.frame[:5] b = self.frame[5:] appended = a.append(b) tm.assert_sp_frame_equal(appended, self.frame, exact_indices=False) a = self.frame.iloc[:5, :3] b = self.frame.iloc[5:] appended = a.append(b) tm.assert_sp_frame_equal(appended.iloc[:, :3], self.frame.iloc[:, :3], exact_indices=False) def test_apply(self): applied = self.frame.apply(np.sqrt) assert isinstance(applied, SparseDataFrame) tm.assert_almost_equal(applied.values, np.sqrt(self.frame.values)) applied = self.fill_frame.apply(np.sqrt) assert applied['A'].fill_value == np.sqrt(2) # agg / broadcast broadcasted = self.frame.apply(np.sum, broadcast=True) assert isinstance(broadcasted, SparseDataFrame) exp = self.frame.to_dense().apply(np.sum, broadcast=True) tm.assert_frame_equal(broadcasted.to_dense(), exp) assert self.empty.apply(np.sqrt) is self.empty from pandas.core import nanops applied = self.frame.apply(np.sum) tm.assert_series_equal(applied, self.frame.to_dense().apply(nanops.nansum)) def test_apply_nonuq(self): orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c']) sparse = orig.to_sparse() res = sparse.apply(lambda s: s[0], axis=1) exp = orig.apply(lambda s: s[0], axis=1) # dtype must be kept assert res.dtype == np.int64 # ToDo: apply must return subclassed dtype assert isinstance(res, pd.Series) tm.assert_series_equal(res.to_dense(), exp) # df.T breaks sparse = orig.T.to_sparse() res = sparse.apply(lambda s: s[0], axis=0) # noqa exp = orig.T.apply(lambda s: s[0], axis=0) # TODO: no non-unique columns supported in sparse yet # tm.assert_series_equal(res.to_dense(), exp) def test_applymap(self): # just test that it works result = self.frame.applymap(lambda x: x * 2) assert isinstance(result, SparseDataFrame) def test_astype(self): sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4], dtype=np.int64), 'B': SparseArray([4, 5, 6, 7], dtype=np.int64)}) assert sparse['A'].dtype == np.int64 assert sparse['B'].dtype == np.int64 res = sparse.astype(np.float64) exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.], fill_value=0.), 'B': SparseArray([4., 5., 6., 7.], fill_value=0.)}, default_fill_value=np.nan) tm.assert_sp_frame_equal(res, exp) assert res['A'].dtype == np.float64 assert res['B'].dtype == np.float64 sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4], dtype=np.int64), 'B': SparseArray([0, 5, 0, 7], dtype=np.int64)}, default_fill_value=0) assert sparse['A'].dtype == np.int64 assert sparse['B'].dtype == np.int64 res = sparse.astype(np.float64) exp = pd.SparseDataFrame({'A': SparseArray([0., 2., 0., 4.], fill_value=0.), 'B': SparseArray([0., 5., 0., 7.], fill_value=0.)}, default_fill_value=0.) tm.assert_sp_frame_equal(res, exp) assert res['A'].dtype == np.float64 assert res['B'].dtype == np.float64 def test_astype_bool(self): sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4], fill_value=0, dtype=np.int64), 'B': SparseArray([0, 5, 0, 7], fill_value=0, dtype=np.int64)}, default_fill_value=0) assert sparse['A'].dtype == np.int64 assert sparse['B'].dtype == np.int64 res = sparse.astype(bool) exp = pd.SparseDataFrame({'A': SparseArray([False, True, False, True], dtype=np.bool, fill_value=False), 'B': SparseArray([False, True, False, True], dtype=np.bool, fill_value=False)}, default_fill_value=False) tm.assert_sp_frame_equal(res, exp) assert res['A'].dtype == np.bool assert res['B'].dtype == np.bool def test_fillna(self): df = self.zframe.reindex(lrange(5)) dense = self.zorig.reindex(lrange(5)) result = df.fillna(0) expected = dense.fillna(0) tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0), exact_indices=False) tm.assert_frame_equal(result.to_dense(), expected) result = df.copy() result.fillna(0, inplace=True) expected = dense.fillna(0) tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0), exact_indices=False) tm.assert_frame_equal(result.to_dense(), expected) result = df.copy() result = df['A'] result.fillna(0, inplace=True) expected = dense['A'].fillna(0) # this changes internal SparseArray repr # tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0)) tm.assert_series_equal(result.to_dense(), expected) def test_fillna_fill_value(self): df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]}) sparse = pd.SparseDataFrame(df) tm.assert_frame_equal(sparse.fillna(-1).to_dense(), df.fillna(-1), check_dtype=False) sparse = pd.SparseDataFrame(df, default_fill_value=0) tm.assert_frame_equal(sparse.fillna(-1).to_dense(), df.fillna(-1), check_dtype=False) def test_sparse_frame_pad_backfill_limit(self): index = np.arange(10) df = DataFrame(np.random.randn(10, 4), index=index) sdf = df.to_sparse() result = sdf[:2].reindex(index, method='pad', limit=5) expected = sdf[:2].reindex(index).fillna(method='pad') expected = expected.to_dense() expected.values[-3:] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) result = sdf[-2:].reindex(index, method='backfill', limit=5) expected = sdf[-2:].reindex(index).fillna(method='backfill') expected = expected.to_dense() expected.values[:3] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) def test_sparse_frame_fillna_limit(self): index = np.arange(10) df = DataFrame(np.random.randn(10, 4), index=index) sdf = df.to_sparse() result = sdf[:2].reindex(index) result = result.fillna(method='pad', limit=5) expected = sdf[:2].reindex(index).fillna(method='pad') expected = expected.to_dense() expected.values[-3:] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) result = sdf[-2:].reindex(index) result = result.fillna(method='backfill', limit=5) expected = sdf[-2:].reindex(index).fillna(method='backfill') expected = expected.to_dense() expected.values[:3] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) def test_rename(self): result = self.frame.rename(index=str) expected = SparseDataFrame(self.data, index=self.dates.strftime( "%Y-%m-%d %H:%M:%S")) tm.assert_sp_frame_equal(result, expected) result = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x))) data = {'A1': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B1': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], 'C1': np.arange(10, dtype=np.float64), 'D1': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]} expected = SparseDataFrame(data, index=self.dates) tm.assert_sp_frame_equal(result, expected) def test_corr(self): res = self.frame.corr() tm.assert_frame_equal(res, self.frame.to_dense().corr()) def test_describe(self): self.frame['foo'] = np.nan self.frame.get_dtype_counts() str(self.frame) desc = self.frame.describe() # noqa def test_join(self): left = self.frame.loc[:, ['A', 'B']] right = self.frame.loc[:, ['C', 'D']] joined = left.join(right) tm.assert_sp_frame_equal(joined, self.frame, exact_indices=False) right = self.frame.loc[:, ['B', 'D']] pytest.raises(Exception, left.join, right) with tm.assert_raises_regex(ValueError, 'Other Series must have a name'): self.frame.join(Series( np.random.randn(len(self.frame)), index=self.frame.index)) def test_reindex(self): def _check_frame(frame): index = frame.index sidx = index[::2] sidx2 = index[:5] # noqa sparse_result = frame.reindex(sidx) dense_result = frame.to_dense().reindex(sidx) tm.assert_frame_equal(sparse_result.to_dense(), dense_result) tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(), dense_result) sparse_result2 = sparse_result.reindex(index) dense_result2 = dense_result.reindex(index) tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2) # propagate CORRECT fill value tm.assert_almost_equal(sparse_result.default_fill_value, frame.default_fill_value) tm.assert_almost_equal(sparse_result['A'].fill_value, frame['A'].fill_value) # length zero length_zero = frame.reindex([]) assert len(length_zero) == 0 assert len(length_zero.columns) == len(frame.columns) assert len(length_zero['A']) == 0 # frame being reindexed has length zero length_n = length_zero.reindex(index) assert len(length_n) == len(frame) assert len(length_n.columns) == len(frame.columns) assert len(length_n['A']) == len(frame) # reindex columns reindexed = frame.reindex(columns=['A', 'B', 'Z']) assert len(reindexed.columns) == 3 tm.assert_almost_equal(reindexed['Z'].fill_value, frame.default_fill_value) assert np.isnan(reindexed['Z'].sp_values).all() _check_frame(self.frame) _check_frame(self.iframe) _check_frame(self.zframe) _check_frame(self.fill_frame) # with copy=False reindexed = self.frame.reindex(self.frame.index, copy=False) reindexed['F'] = reindexed['A'] assert 'F' in self.frame reindexed = self.frame.reindex(self.frame.index) reindexed['G'] = reindexed['A'] assert 'G' not in self.frame def test_reindex_fill_value(self): rng = bdate_range('20110110', periods=20) result = self.zframe.reindex(rng, fill_value=0) exp = self.zorig.reindex(rng, fill_value=0) exp = exp.to_sparse(self.zframe.default_fill_value) tm.assert_sp_frame_equal(result, exp) def test_reindex_method(self): sparse = SparseDataFrame(data=[[11., 12., 14.], [21., 22., 24.], [41., 42., 44.]], index=[1, 2, 4], columns=[1, 2, 4], dtype=float) # Over indices # default method result = sparse.reindex(index=range(6)) expected = SparseDataFrame(data=[[nan, nan, nan], [11., 12., 14.], [21., 22., 24.], [nan, nan, nan], [41., 42., 44.], [nan, nan, nan]], index=range(6), columns=[1, 2, 4], dtype=float) tm.assert_sp_frame_equal(result, expected) # method='bfill' result = sparse.reindex(index=range(6), method='bfill') expected = SparseDataFrame(data=[[11., 12., 14.], [11., 12., 14.], [21., 22., 24.], [41., 42., 44.], [41., 42., 44.], [nan, nan, nan]], index=range(6), columns=[1, 2, 4], dtype=float) tm.assert_sp_frame_equal(result, expected) # method='ffill' result = sparse.reindex(index=range(6), method='ffill') expected = SparseDataFrame(data=[[nan, nan, nan], [11., 12., 14.], [21., 22., 24.], [21., 22., 24.], [41., 42., 44.], [41., 42., 44.]], index=range(6), columns=[1, 2, 4], dtype=float) tm.assert_sp_frame_equal(result, expected) # Over columns # default method result = sparse.reindex(columns=range(6)) expected = SparseDataFrame(data=[[nan, 11., 12., nan, 14., nan], [nan, 21., 22., nan, 24., nan], [nan, 41., 42., nan, 44., nan]], index=[1, 2, 4], columns=range(6), dtype=float) tm.assert_sp_frame_equal(result, expected) # method='bfill' with pytest.raises(NotImplementedError): sparse.reindex(columns=range(6), method='bfill') # method='ffill' with pytest.raises(NotImplementedError): sparse.reindex(columns=range(6), method='ffill') def test_take(self): result = self.frame.take([1, 0, 2], axis=1) expected = self.frame.reindex(columns=['B', 'A', 'C']) tm.assert_sp_frame_equal(result, expected) def test_to_dense(self): def _check(frame, orig): dense_dm = frame.to_dense() tm.assert_frame_equal(frame, dense_dm) tm.assert_frame_equal(dense_dm, orig, check_dtype=False) self._check_all(_check) def test_stack_sparse_frame(self): with catch_warnings(record=True): def _check(frame): dense_frame = frame.to_dense() # noqa wp = Panel.from_dict({'foo': frame}) from_dense_lp = wp.to_frame() from_sparse_lp = spf.stack_sparse_frame(frame) tm.assert_numpy_array_equal(from_dense_lp.values, from_sparse_lp.values) _check(self.frame) _check(self.iframe) # for now pytest.raises(Exception, _check, self.zframe) pytest.raises(Exception, _check, self.fill_frame) def test_transpose(self): def _check(frame, orig): transposed = frame.T untransposed = transposed.T tm.assert_sp_frame_equal(frame, untransposed) tm.assert_frame_equal(frame.T.to_dense(), orig.T) tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T) tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False) self._check_all(_check) def test_shift(self): def _check(frame, orig): shifted = frame.shift(0) exp = orig.shift(0) tm.assert_frame_equal(shifted.to_dense(), exp) shifted = frame.shift(1) exp = orig.shift(1) tm.assert_frame_equal(shifted, exp) shifted = frame.shift(-2) exp = orig.shift(-2) tm.assert_frame_equal(shifted, exp) shifted = frame.shift(2, freq='B') exp = orig.shift(2, freq='B') exp = exp.to_sparse(frame.default_fill_value, kind=frame.default_kind) tm.assert_frame_equal(shifted, exp) shifted = frame.shift(2, freq=BDay()) exp = orig.shift(2, freq=BDay()) exp = exp.to_sparse(frame.default_fill_value, kind=frame.default_kind) tm.assert_frame_equal(shifted, exp) self._check_all(_check) def test_count(self): dense_result = self.frame.to_dense().count() result = self.frame.count() tm.assert_series_equal(result, dense_result) result = self.frame.count(axis=None) tm.assert_series_equal(result, dense_result) result = self.frame.count(axis=0) tm.assert_series_equal(result, dense_result) result = self.frame.count(axis=1) dense_result = self.frame.to_dense().count(axis=1) # win32 don't check dtype tm.assert_series_equal(result, dense_result, check_dtype=False) def _check_all(self, check_func): check_func(self.frame, self.orig) check_func(self.iframe, self.iorig) check_func(self.zframe, self.zorig) check_func(self.fill_frame, self.fill_orig) def test_numpy_transpose(self): sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a']) result = np.transpose(np.transpose(sdf)) tm.assert_sp_frame_equal(result, sdf) msg = "the 'axes' parameter is not supported" tm.assert_raises_regex(ValueError, msg, np.transpose, sdf, axes=1) def test_combine_first(self): df = self.frame result = df[::2].combine_first(df) result2 = df[::2].combine_first(df.to_dense()) expected = df[::2].to_dense().combine_first(df.to_dense()) expected = expected.to_sparse(fill_value=df.default_fill_value) tm.assert_sp_frame_equal(result, result2) tm.assert_sp_frame_equal(result, expected) def test_combine_add(self): df = self.frame.to_dense() df2 = df.copy() df2['C'][:3] = np.nan df['A'][:3] = 5.7 result = df.to_sparse().add(df2.to_sparse(), fill_value=0) expected = df.add(df2, fill_value=0).to_sparse() tm.assert_sp_frame_equal(result, expected) def test_isin(self): sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.) xp = sparse_df[sparse_df.flag == 1.] rs = sparse_df[sparse_df.flag.isin([1.])] tm.assert_frame_equal(xp, rs) def test_sparse_pow_issue(self): # 2220 df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]}) # note : no error without nan df = SparseDataFrame({'A': [nan, 0, 1]}) # note that 2 ** df works fine, also df ** 1 result = 1 ** df r1 = result.take([0], 1)['A'] r2 = result['A'] assert len(r2.sp_values) == len(r1.sp_values) def test_as_blocks(self): df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]}, dtype='float64') # deprecated 0.21.0 with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): df_blocks = df.blocks assert list(df_blocks.keys()) == ['float64'] tm.assert_frame_equal(df_blocks['float64'], df) @pytest.mark.xfail(reason='nan column names in _init_dict problematic ' '(GH 16894)') def test_nan_columnname(self): # GH 8822 nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan]) nan_colname_sparse = nan_colname.to_sparse() assert np.isnan(nan_colname_sparse.columns[0]) def test_isna(self): # GH 8276 df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan], 'B': [0, np.nan, np.nan, 2, np.nan]}) res = df.isna() exp = pd.SparseDataFrame({'A': [True, True, False, False, True], 'B': [False, True, True, False, True]}, default_fill_value=True) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) # if fill_value is not nan, True can be included in sp_values df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan], 'B': [0, np.nan, 0, 2, np.nan]}, default_fill_value=0.) res = df.isna() assert isinstance(res, pd.SparseDataFrame) exp = pd.DataFrame({'A': [False, False, False, False, True], 'B': [False, True, False, False, True]}) tm.assert_frame_equal(res.to_dense(), exp) def test_notna(self): # GH 8276 df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan], 'B': [0, np.nan, np.nan, 2, np.nan]}) res = df.notna() exp = pd.SparseDataFrame({'A': [False, False, True, True, False], 'B': [True, False, False, True, False]}, default_fill_value=False) exp._default_fill_value = np.nan tm.assert_sp_frame_equal(res, exp) # if fill_value is not nan, True can be included in sp_values df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan], 'B': [0, np.nan, 0, 2, np.nan]}, default_fill_value=0.) res = df.notna() assert isinstance(res, pd.SparseDataFrame) exp = pd.DataFrame({'A': [True, True, True, True, False], 'B': [True, False, True, True, False]}) tm.assert_frame_equal(res.to_dense(), exp) @pytest.mark.parametrize('index', [None, list('abc')]) # noqa: F811 @pytest.mark.parametrize('columns', [None, list('def')]) @pytest.mark.parametrize('fill_value', [None, 0, np.nan]) @pytest.mark.parametrize('dtype', [bool, int, float, np.uint16]) def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): # GH 4343 tm.skip_if_no_package('scipy') # Make one ndarray and from it one sparse matrix, both to be used for # constructing frames and comparing results arr = np.eye(3, dtype=dtype) # GH 16179 arr[0, 1] = dtype(2) try: spm = spmatrix(arr) assert spm.dtype == arr.dtype except (TypeError, AssertionError): # If conversion to sparse fails for this spmatrix type and arr.dtype, # then the combination is not currently supported in NumPy, so we # can just skip testing it thoroughly return sdf = pd.SparseDataFrame(spm, index=index, columns=columns, default_fill_value=fill_value) # Expected result construction is kind of tricky for all # dtype-fill_value combinations; easiest to cast to something generic # and except later on rarr = arr.astype(object) rarr[arr == 0] = np.nan expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna( fill_value if fill_value is not None else np.nan) # Assert frame is as expected sdf_obj = sdf.astype(object) tm.assert_sp_frame_equal(sdf_obj, expected) tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) # Assert spmatrices equal assert dict(sdf.to_coo().todok()) == dict(spm.todok()) # Ensure dtype is preserved if possible was_upcast = ((fill_value is None or is_float(fill_value)) and not is_object_dtype(dtype) and not is_float_dtype(dtype)) res_dtype = (bool if is_bool_dtype(dtype) else float if was_upcast else dtype) tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) assert sdf.to_coo().dtype == res_dtype # However, adding a str column results in an upcast to object sdf['strings'] = np.arange(len(sdf)).astype(str) assert sdf.to_coo().dtype == np.object_ @pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811 def test_from_to_scipy_object(spmatrix, fill_value): # GH 4343 dtype = object columns = list('cd') index = list('ab') tm.skip_if_no_package('scipy', max_version='0.19.0') # Make one ndarray and from it one sparse matrix, both to be used for # constructing frames and comparing results arr = np.eye(2, dtype=dtype) try: spm = spmatrix(arr) assert spm.dtype == arr.dtype except (TypeError, AssertionError): # If conversion to sparse fails for this spmatrix type and arr.dtype, # then the combination is not currently supported in NumPy, so we # can just skip testing it thoroughly return sdf = pd.SparseDataFrame(spm, index=index, columns=columns, default_fill_value=fill_value) # Expected result construction is kind of tricky for all # dtype-fill_value combinations; easiest to cast to something generic # and except later on rarr = arr.astype(object) rarr[arr == 0] = np.nan expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna( fill_value if fill_value is not None else np.nan) # Assert frame is as expected sdf_obj = sdf.astype(object) tm.assert_sp_frame_equal(sdf_obj, expected) tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) # Assert spmatrices equal assert dict(sdf.to_coo().todok()) == dict(spm.todok()) # Ensure dtype is preserved if possible res_dtype = object tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) assert sdf.to_coo().dtype == res_dtype def test_from_scipy_correct_ordering(spmatrix): # GH 16179 tm.skip_if_no_package('scipy') arr = np.arange(1, 5).reshape(2, 2) try: spm = spmatrix(arr) assert spm.dtype == arr.dtype except (TypeError, AssertionError): # If conversion to sparse fails for this spmatrix type and arr.dtype, # then the combination is not currently supported in NumPy, so we # can just skip testing it thoroughly return sdf = pd.SparseDataFrame(spm) expected = pd.SparseDataFrame(arr) tm.assert_sp_frame_equal(sdf, expected) tm.assert_frame_equal(sdf.to_dense(), expected.to_dense()) def test_from_scipy_fillna(spmatrix): # GH 16112 tm.skip_if_no_package('scipy') arr = np.eye(3) arr[1:, 0] = np.nan try: spm = spmatrix(arr) assert spm.dtype == arr.dtype except (TypeError, AssertionError): # If conversion to sparse fails for this spmatrix type and arr.dtype, # then the combination is not currently supported in NumPy, so we # can just skip testing it thoroughly return sdf = pd.SparseDataFrame(spm).fillna(-1.0) # Returning frame should fill all nan values with -1.0 expected = pd.SparseDataFrame({ 0: pd.SparseSeries([1., -1, -1]), 1: pd.SparseSeries([np.nan, 1, np.nan]), 2: pd.SparseSeries([np.nan, np.nan, 1]), }, default_fill_value=-1) # fill_value is expected to be what .fillna() above was called with # We don't use -1 as initial fill_value in expected SparseSeries # construction because this way we obtain "compressed" SparseArrays, # avoiding having to construct them ourselves for col in expected: expected[col].fill_value = -1 tm.assert_sp_frame_equal(sdf, expected) class TestSparseDataFrameArithmetic(object): def test_numeric_op_scalar(self): df = pd.DataFrame({'A': [nan, nan, 0, 1, ], 'B': [0, 1, 2, nan], 'C': [1., 2., 3., 4.], 'D': [nan, nan, nan, nan]}) sparse = df.to_sparse() tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse()) def test_comparison_op_scalar(self): # GH 13001 df = pd.DataFrame({'A': [nan, nan, 0, 1, ], 'B': [0, 1, 2, nan], 'C': [1., 2., 3., 4.], 'D': [nan, nan, nan, nan]}) sparse = df.to_sparse() # comparison changes internal repr, compare with dense res = sparse > 1 assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), df > 1) res = sparse != 0 assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), df != 0) class TestSparseDataFrameAnalytics(object): def setup_method(self, method): self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], 'C': np.arange(10, dtype=float), 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]} self.dates = bdate_range('1/1/2011', periods=10) self.frame = SparseDataFrame(self.data, index=self.dates) def test_cumsum(self): expected = SparseDataFrame(self.frame.to_dense().cumsum()) result = self.frame.cumsum() tm.assert_sp_frame_equal(result, expected) result = self.frame.cumsum(axis=None) tm.assert_sp_frame_equal(result, expected) result = self.frame.cumsum(axis=0) tm.assert_sp_frame_equal(result, expected) def test_numpy_cumsum(self): result = np.cumsum(self.frame) expected = SparseDataFrame(self.frame.to_dense().cumsum()) tm.assert_sp_frame_equal(result, expected) msg = "the 'dtype' parameter is not supported" tm.assert_raises_regex(ValueError, msg, np.cumsum, self.frame, dtype=np.int64) msg = "the 'out' parameter is not supported" tm.assert_raises_regex(ValueError, msg, np.cumsum, self.frame, out=result) def test_numpy_func_call(self): # no exception should be raised even though # numpy passes in 'axis=None' or `axis=-1' funcs = ['sum', 'cumsum', 'var', 'mean', 'prod', 'cumprod', 'std', 'min', 'max'] for func in funcs: getattr(np, func)(self.frame)
""" Sony .spi3d LUT Format Input / Output Utilities =============================================== Defines the *Sony* *.spi3d* *LUT* format related input / output utilities objects: - :func:`colour.io.read_LUT_SonySPI3D` - :func:`colour.io.write_LUT_SonySPI3D` """ from __future__ import annotations import numpy as np from colour.io.luts import LUT3D, LUTSequence from colour.io.luts.common import path_to_title from colour.hints import Boolean, Integer, List, Tuple, Union from colour.utilities import ( as_float_array, as_int_array, as_int_scalar, attest, usage_warning, ) __author__ = "Colour Developers" __copyright__ = "Copyright 2013 Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "colour-developers@colour-science.org" __status__ = "Production" __all__ = [ "read_LUT_SonySPI3D", "write_LUT_SonySPI3D", ] def read_LUT_SonySPI3D(path: str) -> LUT3D: """ Read given *Sony* *.spi3d* *LUT* file. Parameters ---------- path *LUT* path. Returns ------- :class:`colour.LUT3D` :class:`LUT3D` class instance. Examples -------- Reading an ordered and an unordered 3D *Sony* *.spi3d* *LUT*: >>> import os >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'sony_spi3d', ... 'Colour_Correct.spi3d') >>> print(read_LUT_SonySPI3D(path)) LUT3D - Colour Correct ---------------------- <BLANKLINE> Dimensions : 3 Domain : [[ 0. 0. 0.] [ 1. 1. 1.]] Size : (4, 4, 4, 3) Comment 01 : Adapted from a LUT generated by Foundry::LUT. >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'sony_spi3d', ... 'Colour_Correct_Unordered.spi3d') >>> print(read_LUT_SonySPI3D(path)) LUT3D - Colour Correct Unordered -------------------------------- <BLANKLINE> Dimensions : 3 Domain : [[ 0. 0. 0.] [ 1. 1. 1.]] Size : (4, 4, 4, 3) Comment 01 : Adapted from a LUT generated by Foundry::LUT. """ title = path_to_title(path) domain_min, domain_max = np.array([0, 0, 0]), np.array([1, 1, 1]) size: Integer = 2 data_table = [] data_indexes = [] comments = [] with open(path) as spi3d_file: lines = filter(None, (line.strip() for line in spi3d_file.readlines())) for line in lines: if line.startswith("#"): comments.append(line[1:].strip()) continue tokens = line.split() if len(tokens) == 3: attest( len(set(tokens)) == 1, 'Non-uniform "LUT" shape is unsupported!', ) size = as_int_scalar(tokens[0]) if len(tokens) == 6: data_table.append(as_float_array(tokens[3:])) data_indexes.append(as_int_array(tokens[:3])) indexes = as_int_array(data_indexes) sorting_indexes = np.lexsort((indexes[:, 2], indexes[:, 1], indexes[:, 0])) attest( np.array_equal( indexes[sorting_indexes], as_int_array( np.around(LUT3D.linear_table(size) * (size - 1)) ).reshape((-1, 3)), ), 'Indexes do not match expected "LUT3D" indexes!', ) table = as_float_array(data_table)[sorting_indexes].reshape( [size, size, size, 3] ) return LUT3D( table, title, np.vstack([domain_min, domain_max]), comments=comments ) def write_LUT_SonySPI3D( LUT: Union[LUT3D, LUTSequence], path: str, decimals: Integer = 7 ) -> Boolean: """ Write given *LUT* to given *Sony* *.spi3d* *LUT* file. Parameters ---------- LUT :class:`LUT3D` or :class:`LUTSequence` class instance to write at given path. path *LUT* path. decimals Formatting decimals. Returns ------- :class:`bool` Definition success. Warnings -------- - If a :class:`LUTSequence` class instance is passed as ``LUT``, the first *LUT* in the *LUT* sequence will be used. Examples -------- Writing a 3D *Sony* *.spi3d* *LUT*: >>> LUT = LUT3D( ... LUT3D.linear_table(16) ** (1 / 2.2), ... 'My LUT', ... np.array([[0, 0, 0], [1, 1, 1]]), ... comments=['A first comment.', 'A second comment.']) >>> write_LUT_SonySPI3D(LUT, 'My_LUT.cube') # doctest: +SKIP """ if isinstance(LUT, LUTSequence): usage_warning( f'"LUT" is a "LUTSequence" instance was passed, using first ' f'sequence "LUT":\n{LUT}' ) LUTxD = LUT[0] else: LUTxD = LUT attest(not LUTxD.is_domain_explicit(), '"LUT" domain must be implicit!') attest(isinstance(LUTxD, LUT3D), '"LUT" must be either a 3D "LUT"!') attest( np.array_equal( LUTxD.domain, np.array( [ [0, 0, 0], [1, 1, 1], ] ), ), '"LUT" domain must be [[0, 0, 0], [1, 1, 1]]!', ) def _format_array(array: Union[List, Tuple]) -> str: """Format given array as a *Sony* *.spi3d* data row.""" return "{1:d} {2:d} {3:d} {4:0.{0}f} {5:0.{0}f} {6:0.{0}f}".format( decimals, *array ) with open(path, "w") as spi3d_file: spi3d_file.write("SPILUT 1.0\n") spi3d_file.write("3 3\n") spi3d_file.write("{0} {0} {0}\n".format(LUTxD.size)) indexes = as_int_array( np.around(LUTxD.linear_table(LUTxD.size) * (LUTxD.size - 1)) ).reshape([-1, 3]) table = LUTxD.table.reshape([-1, 3]) for i, row in enumerate(indexes): spi3d_file.write(f"{_format_array(list(row) + list(table[i]))}\n") if LUTxD.comments: for comment in LUTxD.comments: spi3d_file.write(f"# {comment}\n") return True
import os import re import sys import subprocess import socket import time import shutil import logging import string from functools import lru_cache # Store testdir for safe switch back to directory: testdir = os.path.dirname(os.path.abspath(__file__)) logger = logging.getLogger("util") def make_filename(s): """ Remove all invalid characters from a string for a valid filename. And create a directory if none present. """ folders = ["listings"] parts = list(map(only_valid_chars, s.split("."))) assert parts folders.extend(parts[:-1]) basename = parts[-1] assert basename.startswith("test_") basename = basename[5:] assert basename output_dir = relpath(*folders) if not os.path.exists(output_dir): os.makedirs(output_dir) return os.path.join(output_dir, basename) def only_valid_chars(s): valid_chars = string.ascii_letters + string.digits + "_" return "".join(c for c in s if c in valid_chars) def relpath(*args): return os.path.normpath(os.path.join(testdir, *args)) def source_files(folder, extension): for filename in os.listdir(folder): if filename.endswith(extension): yield os.path.join(folder, filename) qemu_app = "qemu-system-arm" iverilog_app = "iverilog" def tryrm(fn): try: os.remove(fn) except OSError: pass def do_long_tests(arch): """ Determine whether to run samples, these take somewhat longer """ if "LONGTESTS" not in os.environ: return False val = os.environ["LONGTESTS"] if arch in val or val == "all": return True else: return False def do_iverilog(): return "IVERILOG" in os.environ def has_qemu(): """ Determines if qemu is possible """ if hasattr(shutil, "which"): return bool(shutil.which(qemu_app)) else: try: subprocess.check_call([qemu_app, "--version"]) return True except: return False def run_qemu(kernel, machine="lm3s811evb", dump_file=None, dump_range=None): """ Runs qemu on a given kernel file """ if not has_qemu(): return "" # Check bin file exists: assert os.path.isfile(kernel) logger.debug("Running qemu with machine=%s and image %s", machine, kernel) args = [ "qemu-system-arm", "-M", machine, "-m", "16M", "-nographic", "-kernel", kernel, ] return qemu(args) def create_qemu_launch_script(filename, qemu_cmd): """ Create a shell script for a qemu command. This can be used to launch qemu manually for a specific test example. """ # Add additional options for qemu when running from command line: qemu_cmd = qemu_cmd + [ "-serial", "stdio", # route serial port to stdout. # '-S', # Halt on startup # '-gdb', 'tcp::1234', # open gdb port "-D", "trace.txt", "-d", "in_asm,exec,int,op_opt,cpu", # Extensive tracing "-singlestep", # make sure we step a single instruction at a time. ] if "-nographic" in qemu_cmd: qemu_cmd.remove("-nographic") with open(filename, "w") as f: print("#!/bin/bash", file=f) print("", file=f) print("# *** automatically generated QEMU launch file! ***", file=f) print("", file=f) print("{}".format(" ".join(qemu_cmd)), file=f) print("", file=f) if sys.platform == "linux": # chmod +x: import stat st = os.stat(filename) os.chmod(filename, st.st_mode | stat.S_IEXEC) def qemu(args): """ Run qemu with given arguments and capture serial output """ # Listen to the control socket: qemu_control_serve = socket.socket(socket.AF_INET, socket.SOCK_STREAM) qemu_control_serve.bind(("", 0)) # Using 0 as port for autoselect port ctrl_port = qemu_control_serve.getsockname()[1] # Allow a queue of connections, since we start qemu first, then accept # the connection. qemu_control_serve.listen(1) # Listen to the serial output: qemu_serial_serve = socket.socket(socket.AF_INET, socket.SOCK_STREAM) qemu_serial_serve.bind(("", 0)) ser_port = qemu_serial_serve.getsockname()[1] qemu_serial_serve.listen(1) logger.debug("Listening on {} for data".format(ser_port)) args = args + [ "-monitor", "tcp:localhost:{}".format(ctrl_port), "-serial", "tcp:localhost:{}".format(ser_port), "-S", ] logger.debug("Starting qemu like this: %s", args) if hasattr(subprocess, "DEVNULL"): qemu_process = subprocess.Popen(args) # stderr=subprocess.DEVNULL) else: # pypy3 has no dev null: qemu_process = subprocess.Popen(args) # qemu_serial Give process some time to boot: qemu_serial_serve.settimeout(5) qemu_control_serve.settimeout(5) qemu_serial, _ = qemu_serial_serve.accept() qemu_control, _ = qemu_control_serve.accept() # Give the go command: qemu_control.send("cont\n".encode("ascii")) qemu_serial.settimeout(1.0) # Receive all data: data = bytearray() while True: try: data_byte = qemu_serial.recv(1) if len(data_byte) == 0: raise RuntimeError("Connection gone loco?") if data_byte == bytes([4]): # EOT (end of transmission) break data += data_byte except socket.timeout: logger.warning("Timeout on socket") break data = data.decode("ascii", errors="ignore") logger.debug("Received {} characters".format(len(data))) # print('data', data) # Send quit command: qemu_control.send("quit\n".encode("ascii")) if hasattr(subprocess, "TimeoutExpired"): try: qemu_process.wait(timeout=5) except subprocess.TimeoutExpired: qemu_process.kill() else: time.sleep(2) qemu_process.kill() qemu_control.close() qemu_serial.close() qemu_control_serve.shutdown(0) qemu_control_serve.close() qemu_serial_serve.shutdown(0) qemu_serial_serve.close() logger.debug("Qemu closed") # Check that output was correct: return data def run_python(kernel): """ Run given file in python and capture output """ python_proc = subprocess.Popen( [sys.executable, kernel], stdout=subprocess.PIPE ) # PYPY hack: if "pypy" in sys.executable: outs, _ = python_proc.communicate() else: outs, _ = python_proc.communicate(timeout=60) outs = outs.decode("ascii", errors="ignore") outs = outs.replace(os.linesep, "\n") return outs def run_nodejs(js_filename): """ Run given file in nodejs and capture output """ proc = subprocess.Popen(["node", js_filename], stdout=subprocess.PIPE) outs, _ = proc.communicate(timeout=10) outs = outs.decode("ascii", errors="ignore") outs = outs.replace(os.linesep, "\n") return outs @lru_cache(maxsize=None) def has_iverilog(): """ Determines if iverilog is installed """ return hasattr(shutil, "which") and bool(shutil.which(iverilog_app)) def run_msp430(pmem): """ Run the given memory file in the openmsp430 iverilog project. """ # Make a run file with the same name as the mem file: simv = pmem[:-4] + ".run" if not os.path.exists(simv): print() print("======") print("Compiling verilog!") print("======") # compile msp430 bench for this pmem: workdir = relpath( "..", "examples", "msp430", "test_system", "iverilog" ) cmd = [ "iverilog", "-o", simv, "-c", "args.f", "-D", 'MEM_FILENAME="{}"'.format(pmem), "-D", "SEED=123", ] print(cmd) subprocess.check_call(cmd, stdout=sys.stdout, cwd=workdir) print("======") print("Compiled into", simv) print("======") print("======") print("Running", simv) print("======") # run build vvp simulation: outs = subprocess.check_output([simv]) outs = outs.decode("ascii") print(outs) chars = [] for c in re.finditer("Write serial: ([01]{8})", outs): ch = chr(int(c.group(1), 2)) chars.append(ch) data = "".join(chars) return data def run_picorv32(pmem): """ Run the given memory file in the riscvpicorv32 iverilog project. """ # Make a run file with the same name as the mem file: simv = pmem[:-4] + ".run" if not os.path.exists(simv): print() print("======") print("Compiling verilog!") print("======") # compile picorv32 bench for this pmem: workdir = relpath("..", "examples", "riscvpicorv32", "iverilog") cmd = [ "iverilog", "-o", simv, "-c", "args.f", "-D", 'MEM_FILENAME="{}"'.format(pmem), ] print(cmd) subprocess.check_call(cmd, cwd=workdir) print("======") print("Compiled into", simv) print("======") print("======") print("Running", simv) print("======") # run build vvp simulation: outs = subprocess.check_output([simv], timeout=30) outs = outs.decode("ascii") print(outs) return outs avr_emu1 = relpath("..", "examples", "avr", "emu", "build", "emu1") def has_avr_emulator(): return os.path.exists(avr_emu1) def run_avr(hexfile): """ Run hexfile through avr emulator """ command = [avr_emu1, hexfile] logger.debug("Running %s", command) outs = subprocess.check_output(command, timeout=10) outs = outs.decode("ascii") print(outs) chars = [] for c in re.finditer("uart: ([0-9A-F]+)", outs): ch = chr(int(c.group(1), 16)) chars.append(ch) data = "".join(chars) return data def gnu_assemble(source, as_args=(), prefix="arm-none-eabi-"): """ Helper function to feed source through gnu assembling tools """ prefix = "arm-none-eabi-" gas = "{}as".format(prefix) objdump = prefix + "objdump" print("assembling...") p_as = subprocess.Popen([gas] + list(as_args), stdin=subprocess.PIPE) p_as.communicate(input=source.encode("ascii")) if p_as.returncode != 0: raise Exception("{}".format(p_as.returncode)) p_objdump = subprocess.Popen([objdump, "-d"], stdout=subprocess.PIPE) output = p_objdump.communicate()[0].decode("ascii") if p_objdump.returncode != 0: raise Exception("{}".format(p_objdump.returncode)) print(output) p_objdump = subprocess.Popen( [objdump, "-s", "-j", ".text"], stdout=subprocess.PIPE ) output = p_objdump.communicate()[0].decode("ascii") if p_objdump.returncode != 0: raise Exception("{}".format(p_objdump.returncode)) print(output) return output
#!/usr/bin/env python # Copyright 2015 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import os import sys import tempfile import urllib2 from common_includes import * class Preparation(Step): MESSAGE = "Preparation." def RunStep(self): self.Git("fetch origin +refs/heads/*:refs/heads/*") self.GitCheckout("origin/master") self.DeleteBranch("work-branch") class PrepareBranchRevision(Step): MESSAGE = "Check from which revision to branch off." def RunStep(self): self["push_hash"] = (self._options.revision or self.GitLog(n=1, format="%H", branch="origin/master")) assert self["push_hash"] print "Release revision %s" % self["push_hash"] class IncrementVersion(Step): MESSAGE = "Increment version number." def RunStep(self): latest_version = self.GetLatestVersion() # The version file on master can be used to bump up major/minor at # branch time. self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch()) self.ReadAndPersistVersion("master_") master_version = self.ArrayToVersion("master_") # Use the highest version from master or from tags to determine the new # version. authoritative_version = sorted( [master_version, latest_version], key=SortingKey)[1] self.StoreVersion(authoritative_version, "authoritative_") # Variables prefixed with 'new_' contain the new version numbers for the # ongoing candidates push. self["new_major"] = self["authoritative_major"] self["new_minor"] = self["authoritative_minor"] self["new_build"] = str(int(self["authoritative_build"]) + 1) # Make sure patch level is 0 in a new push. self["new_patch"] = "0" # The new version is not a candidate. self["new_candidate"] = "0" self["version"] = "%s.%s.%s" % (self["new_major"], self["new_minor"], self["new_build"]) print ("Incremented version to %s" % self["version"]) class DetectLastRelease(Step): MESSAGE = "Detect commit ID of last release base." def RunStep(self): self["last_push_master"] = self.GetLatestReleaseBase() class PrepareChangeLog(Step): MESSAGE = "Prepare raw ChangeLog entry." def RunStep(self): self["date"] = self.GetDate() output = "%s: Version %s\n\n" % (self["date"], self["version"]) TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE")) commits = self.GitLog(format="%H", git_hash="%s..%s" % (self["last_push_master"], self["push_hash"])) # Cache raw commit messages. commit_messages = [ [ self.GitLog(n=1, format="%s", git_hash=commit), self.GitLog(n=1, format="%B", git_hash=commit), self.GitLog(n=1, format="%an", git_hash=commit), ] for commit in commits.splitlines() ] # Auto-format commit messages. body = MakeChangeLogBody(commit_messages, auto_format=True) AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE")) msg = (" Performance and stability improvements on all platforms." "\n#\n# The change log above is auto-generated. Please review if " "all relevant\n# commit messages from the list below are included." "\n# All lines starting with # will be stripped.\n#\n") AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE")) # Include unformatted commit messages as a reference in a comment. comment_body = MakeComment(MakeChangeLogBody(commit_messages)) AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE")) class EditChangeLog(Step): MESSAGE = "Edit ChangeLog entry." def RunStep(self): print ("Please press <Return> to have your EDITOR open the ChangeLog " "entry, then edit its contents to your liking. When you're done, " "save the file and exit your EDITOR. ") self.ReadLine(default="") self.Editor(self.Config("CHANGELOG_ENTRY_FILE")) # Strip comments and reformat with correct indentation. changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip() changelog_entry = StripComments(changelog_entry) changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines())) changelog_entry = changelog_entry.lstrip() if changelog_entry == "": # pragma: no cover self.Die("Empty ChangeLog entry.") # Safe new change log for adding it later to the candidates patch. TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE")) class PushBranchRef(Step): MESSAGE = "Create branch ref." def RunStep(self): cmd = "push origin %s:refs/heads/%s" % (self["push_hash"], self["version"]) if self._options.dry_run: print "Dry run. Command:\ngit %s" % cmd else: self.Git(cmd) class MakeBranch(Step): MESSAGE = "Create the branch." def RunStep(self): self.Git("reset --hard origin/master") self.Git("new-branch work-branch --upstream origin/%s" % self["version"]) self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"]) self.GitCheckoutFile(VERSION_FILE, self["latest_version"]) self.GitCheckoutFile(WATCHLISTS_FILE, self["latest_version"]) class AddChangeLog(Step): MESSAGE = "Add ChangeLog changes to release branch." def RunStep(self): changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")) old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE)) new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log) TextToFile(new_change_log, os.path.join(self.default_cwd, CHANGELOG_FILE)) class SetVersion(Step): MESSAGE = "Set correct version for candidates." def RunStep(self): self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_") class EnableMergeWatchlist(Step): MESSAGE = "Enable watchlist entry for merge notifications." def RunStep(self): old_watchlist_content = FileToText(os.path.join(self.default_cwd, WATCHLISTS_FILE)) new_watchlist_content = re.sub("(# 'v8-merges@googlegroups\.com',)", "'v8-merges@googlegroups.com',", old_watchlist_content) TextToFile(new_watchlist_content, os.path.join(self.default_cwd, WATCHLISTS_FILE)) class CommitBranch(Step): MESSAGE = "Commit version and changelog to new branch." def RunStep(self): # Convert the ChangeLog entry to commit message format. text = FileToText(self.Config("CHANGELOG_ENTRY_FILE")) # Remove date and trailing white space. text = re.sub(r"^%s: " % self["date"], "", text.rstrip()) # Remove indentation and merge paragraphs into single long lines, keeping # empty lines between them. def SplitMapJoin(split_text, fun, join_text): return lambda text: join_text.join(map(fun, text.split(split_text))) text = SplitMapJoin( "\n\n", SplitMapJoin("\n", str.strip, " "), "\n\n")(text) if not text: # pragma: no cover self.Die("Commit message editing failed.") text += "\n\nTBR=%s" % self._options.reviewer self["commit_title"] = text.splitlines()[0] TextToFile(text, self.Config("COMMITMSG_FILE")) self.GitCommit(file_name = self.Config("COMMITMSG_FILE")) os.remove(self.Config("COMMITMSG_FILE")) os.remove(self.Config("CHANGELOG_ENTRY_FILE")) class LandBranch(Step): MESSAGE = "Upload and land changes." def RunStep(self): if self._options.dry_run: print "Dry run - upload CL." else: self.GitUpload(author=self._options.author, force=True, bypass_hooks=True, private=True) cmd = "cl land --bypass-hooks -f" if self._options.dry_run: print "Dry run. Command:\ngit %s" % cmd else: self.Git(cmd) class TagRevision(Step): MESSAGE = "Tag the new revision." def RunStep(self): if self._options.dry_run: print ("Dry run. Tagging \"%s\" with %s" % (self["commit_title"], self["version"])) else: self.vc.Tag(self["version"], "origin/%s" % self["version"], self["commit_title"]) class CleanUp(Step): MESSAGE = "Done!" def RunStep(self): print("Congratulations, you have successfully created version %s." % self["version"]) self.GitCheckout("origin/master") self.DeleteBranch("work-branch") self.Git("gc") class CreateRelease(ScriptsBase): def _PrepareOptions(self, parser): group = parser.add_mutually_exclusive_group() group.add_argument("-f", "--force", help="Don't prompt the user.", default=True, action="store_true") group.add_argument("-m", "--manual", help="Prompt the user at every important step.", default=False, action="store_true") parser.add_argument("-R", "--revision", help="The git commit ID to push (defaults to HEAD).") def _ProcessOptions(self, options): # pragma: no cover if not options.author or not options.reviewer: print "Reviewer (-r) and author (-a) are required." return False return True def _Config(self): return { "PERSISTFILE_BASENAME": "/tmp/create-releases-tempfile", "CHANGELOG_ENTRY_FILE": "/tmp/v8-create-releases-tempfile-changelog-entry", "COMMITMSG_FILE": "/tmp/v8-create-releases-tempfile-commitmsg", } def _Steps(self): return [ Preparation, PrepareBranchRevision, IncrementVersion, DetectLastRelease, PrepareChangeLog, EditChangeLog, PushBranchRef, MakeBranch, AddChangeLog, SetVersion, EnableMergeWatchlist, CommitBranch, LandBranch, TagRevision, CleanUp, ] if __name__ == "__main__": # pragma: no cover sys.exit(CreateRelease().Run())
""" tonality.py - Matrix representations of musical scores, corpara, and their tonality Example: J. S. Bach's "Well Tempered Clavier" Books 1 and 2 2015, Michael A. Casey, Dartmouth College, Bregman Media Labs License: Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) http://creativecommons.org/licenses/by-nc/4.0/ """ import pylab as P import numpy as np import glob, sys, pdb import bregman try: import music21 as m21 except: print "Warning: music21 not installed, only loading .ascii files supported" pc_labels = np.tile(['C','C#','D','Eb','E','F','F#','G','G#','A','Bb','B'],13) def _report(reportStr): print reportStr sys.stdout.flush() def extract_audio_chroma(flist, nSecs = 10, nSamps = 6): """ Given a list of WAV files, extract chromagram features and stack Options: nSecs - how many seconds each sample lasts nSamps - how many samples to take from each file """ F = [] for fname in flist: x,sr,fmt = bregman.sound.wavread(fname) if len(x.shape) > 1: # Check if stereo x = x.mean(1) # Convert stereo to MONO for _ in range(nSamps): start = np.random.randint(x.shape[0] - sr*nSecs) # 10s segments y = x[start:start+nSecs*sr] chrom = bregman.features.Chromagram(y, nfft=8192, wfft=8192, nhop=sr/20) X = chrom.X.T.reshape(nSecs,-1,12).mean(0).T # 1s averaging F.append(X) return np.hstack(F) def load_corpus(corpus=None, idx=None, win_len=1, sample_len=0): """ Load items from a corpus, use given idx slice argument to select subsets Inputs: corpus - list of symbolic music files (xml, mid, krn, etc...) idx - slice argument giving range of works [None] (all) win_len - num tactus beats to integrate [1] (no integration) sample_len - number of sampled windows per work [0] (all) """ if corpus is None or (type(corpus) is str and corpus is not ""): corpus_path=glob.glob(m21.__path__[0]+'/corpus/bach/bwv8[4-9][0-9]') corpus = [] for w in corpus_path: for v in sorted(glob.glob(w+'/*')): corpus.append(v) corpus.sort() print idx _report("slicing work list...") idx = slice(0,len(corpus)) if idx is None else idx _report("parsing corpus...") corpusList = [m21.converter.parse(w) for w in corpus[idx]] _report("extracting notes...") corpusNotes = [_extract_notes_positions_and_durations(w) for w in corpusList] _report("converting notes to matrices...") corpusMtx = [_sample_mtx(_convert_notes_to_matrix(n), win_len, sample_len) for n in corpusNotes] _report("done.") return corpusMtx def _sample_mtx(M, win_len, sample_len): if win_len>1: M = win_mtx(M, win_len) if sample_len>0: M = M[:,np.random.permutation(M.shape[1])[:sample_len]] return M def _extract_notes_positions_and_durations(work): """ Return note positions and durations """ notes = np.array([(nn.midi,n.offset,n.quarterLength) for n in work.flat.notes for nn in n.pitches]) notes = notes[np.where(notes[:,2])] return notes #edit to include manual length and smallest duration # start_t is start time in quarter notes # duration is duration in quarter notes def _convert_notes_to_matrix(notes, start_t=0, duration=128): # start_t and duration offset in quarters """ Given a list of (midi,quarterLength) tuples, collate all notes per tactus tick (smallest duration) and make piano-roll matrix """ smallest_dur = _calc_smallest_dur(notes) #manually calculate if none given start_times = np.array(notes)[:,1] # time_idx = (start_times >= start_t) & (start_times < start_t + duration) notes = np.array(notes).copy()[time_idx] t0 = notes[0,1] N = notes[-1,1] - t0 d = notes[-1,2] Nc = (N+d) / smallest_dur mtx = np.zeros((128,Nc)) for n in notes: mtx[n[0],(n[1]-t0)/smallest_dur:(n[1]-t0+n[2])/smallest_dur]=1 return mtx #calculate smallest interval def _calc_smallest_dur(notes): tick = np.array(notes)[:,2].min() return tick def load_wtc(idx=None, win_len=1, sample_len=0): """ Load scores in matrix form in the entire WTC dataset. Inputs: idx - slice argument giving range of works [None] (all) win_len - num tactus beats to integrate [1] (no integration) sample_len - number of sampled windows per work [0] (all) """ flist = sorted(glob.glob('*.ascii')) if idx is not None: if not np.iterable(idx): idx = [idx] else: idx = range(len(flist)) flist = np.array(flist)[idx] if win_len>0: A = [win_mtx(np.loadtxt(fname, dtype='i4'),win_len) for fname in flist] else: A = [np.loadtxt(fname, dtype='i4').mean(1) for fname in flist] if win_len>0 and sample_len>0: AA = [a[:,np.random.permutation(a.shape[1])[:sample_len]] for a in A] else: AA = A return AA def win_mtx(a, win_len=2): """ Options: win_len - window length [2] """ # perform simple integration N = np.ceil(a.shape[1]/float(win_len)) aa = [] for k in np.arange(N-1): aa.append(a[:,k*win_len:(k+1)*win_len].mean(1)) return np.vstack(aa).T def fold_mtx(a): """ Fold piano-roll matrix into single octave beginning with 'C'. """ return a[:120,:].reshape(-1,12,a.shape[1]).mean(0) def dissimilarity_mtx(A): """ Given a piano-roll indicator matrix, construct self-dissimilarity matrix """ D = bregman.distance.euc_normed(A.T,A.T) return D def center_mtx(D): """ Given a dissimilarity or dissonance matrix, center the matrix by subtracting the mean of the rows and columns. For a dissimilarity matrix this operation yields the "scatter matrix". """ H = np.eye(D.shape[0]) - 1.0/D.shape[0] B = np.dot(np.dot(H,-0.5*D),H) return B def dissonance_mtx(A): """ Given a piano-roll indicator matrix, construct pair-wise dissonance matrix """ n = A.shape[1] D = np.zeros((n,n)) for i,a in enumerate(A.T[:-1]): for j,b in enumerate(A.T[i+1:]): D[i,j] = dissonance_fun(np.expand_dims(a+b,1)) def dissonance_fun(A): """ Given a piano-roll indicator matrix representation of a musical work (128 pitches x beats), return the dissonance as a function of beats. Input: A - 128 x beats indicator matrix of MIDI pitch number """ freq_rats = np.arange(1,11) # Harmonic series ratios amps = np.exp(-.5 * freq_rats) # Partial amplitudes F0 = 8.1757989156 # base frequency for MIDI (note 0) diss = [] # List for dissonance values thresh = 1e-3 for beat in A.T: idx = np.where(beat>thresh)[0] if len(idx): freqs, mags = [], [] # lists for frequencies, mags for i in idx: freqs.extend(F0*2**(i/12.0)*freq_rats) mags.extend(amps) freqs = np.array(freqs) mags = np.array(mags) sortIdx = freqs.argsort() d = _dissonance_fun(freqs[sortIdx],mags[sortIdx]) diss.extend([d]) else: diss.extend([-1]) # Null value return np.array(diss) def _dissonance_fun(freqs, amps=None, params=None): """ :: Compute dissonance between partials with center frequencies in freqs, and amplitudes in amps. Based on William Sethares after Plomp and Levelt: default params = (-3.51, -5.75, 0.0207, 19.96, 5, -5, 0.24) default amps, use 1 as amplitude for all partials. """ if params == None: params = (-3.51, -5.75, 0.0207, 19.96, 5, -5, 0.24) b1, b2, s1, s2, c1, c2, Dstar = params if amps == None: amps = [1]*len(freqs) f = np.array(freqs) a = np.array(amps) idx = np.argsort(f) f = f[idx] a = a[idx] N = f.size D = 0 for i in range(1, N): Fmin = f[ 0 : N - i ] S = Dstar / ( s1 * Fmin + s2) Fdif = f[ i : N ] - f[ 0 : N - i ] am = a[ i : N ] * a[ 0 : N - i ] Dnew = am * (c1 * np.exp (b1 * S * Fdif) + c2 * np.exp(b2 * S * Fdif)) D += Dnew.sum() return D def plot_mtx(mtx=None, title=None, newfig=False, cbar=True, **kwargs): """ :: static method for plotting a matrix as a time-frequency distribution (audio features) """ if mtx is None or type(mtx) != np.ndarray: raise ValueError('First argument, mtx, must be a array') if newfig: P.figure() dbscale = kwargs.pop('dbscale', False) bels = kwargs.pop('bels',False) norm = kwargs.pop('norm',False) normalize = kwargs.pop('normalize',False) origin=kwargs.pop('origin','lower') aspect=kwargs.pop('aspect','auto') interpolation=kwargs.pop('interpolation','nearest') cmap=kwargs.pop('cmap',P.cm.gray_r) clip=-100. X = scale_mtx(mtx, normalize=normalize, dbscale=dbscale, norm=norm, bels=bels) i_min, i_max = np.where(X.mean(1))[0][[0,-1]] X = X[i_min:i_max+1].copy() if dbscale or bels: if bels: clip/=10. P.imshow(P.clip(X,clip,0),origin=origin, aspect=aspect, interpolation=interpolation, cmap=cmap, **kwargs) else: P.imshow(X,origin=origin, aspect=aspect, interpolation=interpolation, cmap=cmap, **kwargs) if title: P.title(title,fontsize=16) if cbar: P.colorbar() P.yticks(np.arange(0,i_max+1-i_min,3),pc_labels[i_min:i_max+1:3],fontsize=14) P.xlabel('Tactus', fontsize=14) P.ylabel('MIDI Pitch', fontsize=14) P.grid() def scale_mtx(M, normalize=False, dbscale=False, norm=False, bels=False): """ :: Perform mutually-orthogonal scaling operations, otherwise return identity: normalize [False] dbscale [False] norm [False] """ if not (normalize or dbscale or norm or bels): return M else: X = M.copy() # don't alter the original if norm: nz_idx = (X*X).sum(1) > 0 X[nz_idx] = (X[nz_idx].T / np.sqrt((X[nz_idx]*X[nz_idx]).sum(1))).T if normalize: X=X-np.min(X) X=X/np.max(X) if dbscale or bels: X = P.log10(P.clip(X,0.0001,X.max())) if dbscale: X = 20*X return X def hist_mtx(mtx, tstr=''): """ Given a piano-roll matrix, 128 MIDI piches x beats, plot the pitch class histogram """ i_min, i_max = np.where(mtx.mean(1))[0][[0,-1]] P.figure(figsize=(14.5,8)) P.stem(np.arange(i_max+1-i_min),mtx[i_min:i_max+1,:].sum(1)) ttl = 'Note Frequency' if tstr: ttl+=': '+tstr P.title(ttl,fontsize=16) t=P.xticks(np.arange(0,i_max+1-i_min,3),pc_labels[i_min:i_max+1:3],fontsize=14) P.xlabel('Pitch Class', fontsize=14) P.ylabel('Frequency', fontsize=14) ax = P.axis() P.axis(xmin=-0.5) P.grid() if __name__ == "__main__": P.interactive(True) a = np.loadtxt('01.ascii') P.figure() # Plot piano roll: MIDI pitch by beats P.subplot(211) plot_mtx(a, cmap=P.cm.gray_r, cbar=False) P.axis('tight') P.title('WTC 1 "Prelude in C": Piano Roll') # Plot dissonance by (integrated) beats P.subplot(212) win_len=8 # Number of beats to integrate, non-overlapping a = win_mtx(a, win_len) d = dissonance_fun(a) P.plot(np.arange(len(d))*win_len, d,'r',linewidth=1) P.axis('tight') P.title('Dissonance (win_len=%d)'%win_len, fontsize=16)
# -*- coding: utf-8 -*- """ Created on Fri Sep 12 20:41:09 2014 @author: nestor """ import matplotlib.pyplot as plt import scipy as sp from matplotlib.backends.backend_pdf import PdfPages as pp xlabel_names = { 'symbol_size' : r"$Symbol\ Size\ [KB]$", 'symbols' : r"$Symbols$", 'loss_rate' : r"$Loss\ Rate\ [\%]$", 'erased_symbols' : r"$Erased\ Symbols$" } ylabel_names = { 'goodput' : r"$Goodput\ [MB/s]$", 'extra_symbols' : r"$Average\ Extra\ Symbols$" } title_names = { 'symbol_size' : r"$\ Packet\ size\colon\ $", 'symbols' : r"$\ Symbols\colon\ $", 'loss_rate' : r"$\ Loss\ rate\colon\ $", 'type' : r"$\ Device\colon\ $" } label_names = { u'(OpenFEC, 1.0)' : "$OpenFEC-LDPC$", u'(Perpetual, 0.2652)' : "$Kodo-Perpetual,\ w_r =\ 0.2652$", u'(Perpetual, 0.375)' : "$Kodo-Perpetual,\ w_r =\ 0.375$", u'(Perpetual, 0.5303)' : "$Kodo-Perpetual,\ w_r =\ 0.5303$", u'(SparseFullRLNC, 0.3)' : "$Kodo-Sparse\ RLNC,\ d =\ 0.3$", u'(SparseFullRLNC, 0.4)' : "$Kodo-Sparse\ RLNC,\ d =\ 0.4$", u'(SparseFullRLNC, 0.5)' : "$Kodo-Sparse\ RLNC,\ d =\ 0.5$", u'(SparseThread, 0.3)' : "$Kodo-Sparse\ Threading,\ d =\ 0.3$", u'(SparseThread, 0.4)' : "$Kodo-Sparse\ Threading,\ d =\ 0.4$", u'(SparseThread, 0.5)' : "$Kodo-Sparse\ Threading,\ d =\ 0.5$", u'(Thread, 1.0)' : "$Kodo-Threading\ RLNC$", u'(FullRLNC, 1.0)' : "$Kodo-Full\ RLNC$", u'(ISA, 1.0)' : "$ISA-RS$", u'(Jerasure, 1.0)' : "$Jerasure-RS$", } def title_symbol_size(symbol_size): return '$' + str(symbol_size/1000) + '\ KB.$' def title_loss_rate(loss_rate): return '$' + str(int(loss_rate*100)) + '\%.$' def title_device_type(device): return '$' + device.title() + '.$' parameter_functions = {'symbol_size' : title_symbol_size, 'loss_rate' : title_loss_rate, 'type' : title_device_type, } def set_parameter(parameter,parameters_list,keys): if parameter not in parameter_functions.keys(): return '$' + str(keys[parameters_list.index(parameter)]) + '.$' else: return parameter_functions[parameter]( keys[parameters_list.index(parameter)]) def get_plot_title(parameters_list,keys): title = r'' for parameter in parameters_list: title += title_names[parameter] + set_parameter(parameter, parameters_list, keys) return title xscale_arguments = { ('goodput','symbol_size') : ['log',dict(basex=2)], ('goodput','symbols') : ['log',dict(basex=2)], ('goodput','loss_rate') : ['linear',dict()], ('goodput','erased_symbols') : ['linear',dict()], ('extra_symbols','symbols') : ['linear',dict()] } symbol_size_label = { 32000 : r"$32$", 64000 : r"$64$", 128000 : r"$128$", 256000 : r"$256$", 512000 : r"$512$", 1024000 : r"$1024$" } symbols_label = { 8 : r"$8$", 16 : r"$16$", 32 : r"$32$", 64 : r"$64$", 128 : r"$128$", 256 : r"$256$", 512 : r"$512$" } loss_rate_label = { 0.05 : r"$5$", 0.1 : r"$10$", 0.15 : r"$15$", 0.2 : r"$20$", 0.25 : r"$25$", 0.3 : r"$30$" } erased_symbols_label = { } varying_xlabels = {'symbol_size' : symbol_size_label, 'symbols' : symbols_label, 'loss_rate' : loss_rate_label, 'erased_symbols' : erased_symbols_label } pltkind = { ('goodput','symbol_size') : 'line', ('goodput','symbols') : 'line', ('goodput','loss_rate') : 'line', ('goodput','erased_symbols') : 'bar', ('extra_symbols','symbols') : 'bar' } def set_axis_properties(p,metric,varying_parameter,group): #Set major x-axis label plt.xlabel(xlabel_names[varying_parameter]) #Set x-axis scale xscale_args = xscale_arguments[(metric,varying_parameter)] plt.xscale(xscale_args[0],**xscale_args[1]) #Set x-axis tick labels #Get tick values ticks = list(sp.unique(group[varying_parameter])) #If an item is not in the tick dictionary for the bar plot, add it if pltkind[(metric,varying_parameter)] is 'bar': for item in ticks: if item not in varying_xlabels[varying_parameter].keys(): varying_xlabels[varying_parameter][item] = '$' + str(item) +'$' xlabels = [ varying_xlabels[varying_parameter][item] for item in ticks] if pltkind[(metric,varying_parameter)] is 'bar': p.set_xticks(sp.arange(len(ticks))+0.5) plt.setp(p.set_xticklabels(xlabels), rotation=0) else: plt.xticks(ticks,xlabels) plt.ylabel(ylabel_names[metric]) plt.grid('on') def set_plot_legend(p,metric,varying_parameter): lines, labels = p.get_legend_handles_labels() if pltkind[(metric,varying_parameter)] is 'line': for l in lines: l.set_marker(markers[lines.index(l)]) for lb in labels: labels[labels.index(lb)] = label_names[lb] p.legend(lines,labels,loc='center left',ncol=1,fontsize='small', bbox_to_anchor=(1, 0.5)) def get_filename(metric,varying_parameter,fixed_parameters,keys,density): fixed_values = "" for fixed in fixed_parameters: fixed_values += "_" + fixed + "_" + \ str(keys[fixed_parameters.index(fixed)]) filename = density + "_" + metric + "_" + varying_parameter + \ fixed_values + ".pdf" return filename def plot_metric(df,metric,varying_parameter,fixed_parameters,cases,density): df_group = df.groupby(by=fixed_parameters) all_figures_filename = "all_" + density + "_" + metric + "_vs_" + \ varying_parameter + ".pdf" pdf = pp(all_figures_filename) for keys, group in df_group: p = group.pivot_table(metric,cols=cases,rows=varying_parameter).plot( kind=pltkind[(metric,varying_parameter)]) plt.title(get_plot_title(fixed_parameters,keys),fontsize=font_size) set_axis_properties(p,metric,varying_parameter,group) set_plot_legend(p,metric,varying_parameter) filename = get_filename(metric,varying_parameter, fixed_parameters,keys,density) plt.savefig(filename,bbox_inches='tight') pdf.savefig(bbox_inches='tight') plt.close() pdf.close() ######################## PLOTTING SETTINGS ################################ font_size=14 font = {'family' : 'sans-serif', 'weight' : 'medium', 'style' : 'normal', 'size' : font_size} plt.rc('font', **font) plt.rc('text', usetex=True) plt.rc('xtick',labelsize=font_size) plt.rc('ytick',labelsize=font_size) colors = ['SteelBlue','DarkBlue', 'LimeGreen','DarkGreen', 'Crimson','DarkRed', 'Brown','Black'] plt.rc('axes', color_cycle = colors) markers = ["d","+","x","^","v","s","*","h"]
""" This file contain the viewsets for the course, offering, courseinfo and CourseHistory models. Functionality Provided: Course: + Add - (IsDeveloper) + Update, PartialUpdate, Delete - (IsCourseDeveloper) + List - (everyone allowed) + Retrieve - (IsRegisteredOrAnyInstructor) - add_page - (IsOwner) - pages - (IsRegisteredOrAnyInstructor) - reorder_pages - (IsOwner) - courseInfo - (None) - pending_students, approve - (IsOwner) - register - (None) - deregister - (None) - progress - (IsRegistered) - get_all_marks - (IsOwner) - groups - (IsRegisteredOrAnyInstructor) - add_group - (IsOwner) - reorder_groups - (IsOwner) Offering: + Add: (IsInstructor) + Update, PartialUpdate, Delete: (IsCourseInstructor) + List: (None) + Retrieve: (IsRegistered) - get_shortlisted_courses - (IsOwner) - shortlist_course - (IsOwner) """ from django.shortcuts import get_object_or_404 from django.contrib.auth.models import User from courseware.models import Course, Offering, CourseInfo, \ CourseHistory, Group, ConceptHistory from courseware.vsserializers.course import CourseSerializer, \ CourseHistorySerializer, OfferingSerializer, CourseInfoSerializer from courseware.serializers import GroupSerializer, AddGroupSerializer, \ ConceptHistorySerializer from courseware import playlist from courseware.permissions import IsInstructorOrReadOnly, IsRegistered, \ IsContentDeveloperOrReadOnly, IsOwnerOrReadOnly, IsOwner from rest_framework import mixins, status, viewsets from rest_framework.response import Response from rest_framework.decorators import link, action from rest_framework.permissions import AllowAny from discussion_forum.models import UserSetting from document.models import Document from document.serializers import DocumentSerializer from discussion_forum.models import Tag from elearning_academy.permissions import get_mode import ast class CourseViewSet(viewsets.ModelViewSet): """ ViewSet for Course Class """ model = Course serializer_class = CourseSerializer permission_classes = [IsContentDeveloperOrReadOnly] def get_queryset(self): """ Optionally restricts the returned courses to a given category. """ queryset = Course.objects.all() category = self.request.QUERY_PARAMS.get('category', None) if category is not None: queryset = queryset.filter(category__id=category) return queryset def list(self, request): """ List all the courses for q queryset """ mode = get_mode(request) queryset = Course.objects.all() category = self.request.QUERY_PARAMS.get('category', None) if category is not None: queryset = queryset.filter(category__id=category) if mode == 'I': queryset = queryset.filter(type='T') elif mode == 'C': queryset = queryset.filter() else: queryset = queryset.filter( type='O', course_info__is_published=True) serializer = CourseSerializer(queryset) response = { "count": len(serializer.data), "next": "null", "previous": "null", "results": serializer.data } return Response(response) def create(self, request, *args): """ Function for creating a course in a category """ serializer = CourseSerializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): serializer.save() coursehistory = CourseHistory( user=request.user, course=serializer.object, active='A', is_owner=True ) coursehistory.save() # Usersetting for the discussion forum usersetting = UserSetting( user=request.user, forum=serializer.object.forum, super_user=True, moderator=True, badge='IN') usersetting.save() # send for approval now return Response(serializer.data) else: content = serializer.errors return Response(content, status.HTTP_400_BAD_REQUEST) @action(methods=['POST'], permission_classes=((IsOwnerOrReadOnly,)), serializer_class=DocumentSerializer) def add_page(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) serializer = DocumentSerializer(data=request.DATA) if serializer.is_valid(): document = Document( title=serializer.data['title'], is_heading=True, description=serializer.data['description'] ) document.save() _course.pages.add(document) _course.page_playlist = playlist.append( document.id, _course.page_playlist) _course.save() return Response(DocumentSerializer(document).data) else: content = serializer.errors return Response(content, status.HTTP_400_BAD_REQUEST) @link(permission_classes=((IsOwnerOrReadOnly,))) def pages(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) pages = _course.pages.all() serializer = DocumentSerializer(pages, many=True) page_playlist = playlist.to_array(_course.page_playlist) N = len(page_playlist) ordered_data = [""] * N for i in range(N): ordered_data[i] = serializer.data[page_playlist[i][1]] return Response(ordered_data) @action( methods=['PATCH'], permission_classes=((IsOwnerOrReadOnly,)), serializer_class=CourseSerializer) def reorder_pages(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) print request.DATA myplaylist = request.DATA['playlist'] print myplaylist newplaylist = playlist.is_valid(myplaylist, _course.page_playlist) if newplaylist is not False: _course.page_playlist = newplaylist _course.save() return Response(_course.page_playlist) else: content = "Given order data does not have the correct format" return Response(content, status.HTTP_404_NOT_FOUND) @link() def courseInfo(self, request, pk=None): """ Function to get courseinfo, anyone can access courseinfo as it is public. TODO: to add permission to avoid giving info to for a unpublished course. """ _course = get_object_or_404(Course, pk=pk) #self.check_object_permissions(request, _course) courseinfo = CourseInfo.objects.get(CourseInfo_Course=_course) serializer = CourseInfoSerializer(courseinfo) return Response(serializer.data) @action(methods=['POST'], permission_classes=((IsOwner, ))) def publish(self, request, pk=None): """ Publish a course so that students can see it for enrollment """ course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, course) courseinfo = course.course_info if courseinfo.is_published: return Response({"error": "Cannot unpublish course"}, status.HTTP_400_BAD_REQUEST) courseinfo.is_published = True courseinfo.save() return Response({"msg": "Published Course"}, status.HTTP_200_OK) @link(permission_classes=((IsOwner, ))) def approved_students(self, request, pk=None): """ List approved students in the course """ _course = get_object_or_404(Course, pk=pk) if (_course.type == 'T'): return Response({'response': False, 'students': []}) self.check_object_permissions(request, _course) students = CourseHistory.objects.filter(course=_course, active='A') returned_data = [] for student in students: user = User.objects.get(pk=student.user.id) returned_data.append({ "user": user.id, "username": user.username, "fullname": user.get_full_name(), "email": user.email }) return Response({'response': True, 'students': returned_data}) @link(permission_classes=((IsOwner, ))) def pending_students(self, request, pk=None): """ List the students which are pending approval in the course """ _course = get_object_or_404(Course, pk=pk) if (_course.type == 'T'): return Response({'response': False, 'students': []}) self.check_object_permissions(request, _course) students = CourseHistory.objects.filter(course=_course, active='P') returned_data = [] for student in students: user = User.objects.get(pk=student.user.id) returned_data.append({ "user": user.id, "username": user.username, "fullname": user.get_full_name(), "email": user.email, }) return Response({'response': True, 'students': returned_data}) @action(methods=['POST'], permission_classes=((IsOwner, ))) def approve(self, request, pk=None): """ This function takes a list of student ids and approve their request to register """ _course = get_object_or_404(Course, pk=pk, type='O') self.check_object_permissions(request, _course) if 'students' in request.DATA: # converting a string representation of array to students = ast.literal_eval(request.DATA['students']) for student in students: try: coursehistory = CourseHistory.objects.get( course=_course, user=student, active='P' ) coursehistory.active = 'A' coursehistory.save() except: continue return Response({"msg": 'Success'}, status.HTTP_200_OK) else: print request.DATA return Response({"error": "Bad request format"}, status.HTTP_400_BAD_REQUEST) @link() def register(self, request, pk=None): """ Register a student to a course. """ course = get_object_or_404(Course, pk=pk, type='O') try: coursehistory = CourseHistory.objects.get( course=course, user=request.user, ) if coursehistory.active == 'U': coursehistory.active = 'A' coursehistory.save() # TODO: shift the usersetting to the approve function usersetting = UserSetting.objects.filter( user=request.user, forum=course.forum) if len(usersetting) > 0: usersetting = usersetting[0] usersetting.is_active = True usersetting.save() return Response("Successfully registered", status.HTTP_202_ACCEPTED) else: return Response( "Your approval is pending. Please contact instructor for your approval", status.HTTP_400_BAD_REQUEST ) except: coursehistory = CourseHistory( course=course, user=request.user, active='A' ) if course.enrollment_type == 'M': coursehistory.active = 'P' coursehistory.save() usersetting = UserSetting(user=request.user, forum=course.forum) usersetting.save() return Response("Successfully registered", status.HTTP_202_ACCEPTED) # TODO : should be action and not link # TODO : register and deregister should be sent to OfferingViewSet # TODO : user active should also be made choice field @link() def deregister(self, request, pk=None): course = get_object_or_404(Course, pk=pk, type='O') try: coursehistory = CourseHistory.objects.get( course=course, user=request.user, # an owner cannot deregister himself from the course is_owner=False ) if coursehistory.active != 'U': coursehistory.active = 'U' coursehistory.save() usersetting = UserSetting.objects.filter( user=request.user, forum=course.forum) if len(usersetting) > 0: usersetting = usersetting[0] usersetting.is_active = False usersetting.save() return Response(CourseSerializer(course).data) except: error = 'You were not registered in this course.' return Response(error) @link(permission_classes=((IsRegistered, ))) def progress(self, request, pk=None): """ Function to get marks in a course """ _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) history = get_object_or_404( CourseHistory, course=_course, user=request.user ) return Response(history.progress()) @link(permission_classes=((IsRegistered,))) def get_all_public_marks(self, request, pk=None): """ Function to get marks of all the students for whom show_marks is true """ _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) histories = CourseHistory.objects.filter( course=_course, is_owner=False, show_marks=True).order_by('score').reverse() data = {} data['students'] = [] i = 0 for history in histories: data['students'].append({}) data['students'][i]['score'] = history.score data['students'][i]['max_score'] = history.course.max_score data['students'][i]['user'] = history.user.username data['students'][i]['id'] = history.user.id data['students'][i]['name'] = history.user.get_full_name() i += 1 return Response(data) @link(permission_classes=((IsRegistered,))) def get_all_public_marks_student(self, request, pk=None): """ Function to get marks of a student for whom show_marks is true """ _course = get_object_or_404(Course, pk=pk) studentID = self.request.QUERY_PARAMS.get('student', None) _student = get_object_or_404(User, pk=studentID) self.check_object_permissions(request, _course) history = CourseHistory.objects.get( course=_course, user=_student, is_owner=False, show_marks=True) data = {} data = (history.progress()) data['user'] = history.user.username data['id'] = history.user.id data['name'] = history.user.get_full_name() return Response(data) @link(permission_classes=((IsOwner,))) def get_all_marks(self, request, pk=None): """ Function to get marks of all the students """ _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) histories = CourseHistory.objects.filter( course=_course, is_owner=False).order_by('score').reverse() data = {} data['students'] = [] i = 0 for history in histories: data['students'].append({}) data['students'][i]['score'] = history.score data['students'][i]['max_score'] = history.course.max_score data['students'][i]['user'] = history.user.username data['students'][i]['id'] = history.user.id data['students'][i]['name'] = history.user.get_full_name() i += 1 return Response(data) @link(permission_classes=((IsOwner,))) def get_all_marks_student(self, request, pk=None): """ Function to get marks of a student """ _course = get_object_or_404(Course, pk=pk) studentID = self.request.QUERY_PARAMS.get('student', None) _student = get_object_or_404(User, pk=studentID) self.check_object_permissions(request, _course) history = CourseHistory.objects.get( course=_course, user=_student, is_owner=False) data = {} data = (history.progress()) data['user'] = history.user.username data['id'] = history.user.id data['name'] = history.user.get_full_name() return Response(data) @link(permission_classes=((IsOwnerOrReadOnly, ))) def groups(self, request, pk=None): """ Function to get all the groups in a course """ _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) _groups = Group.objects.filter(course=_course) serializer = GroupSerializer(_groups, many=True) _playlist = playlist.to_array(_course.playlist) N = len(_playlist) ordered_data = [""] * N for i in range(N): ordered_data[i] = serializer.data[_playlist[i][1]] return Response(ordered_data) @action( methods=['POST'], permission_classes=((IsOwnerOrReadOnly,)), serializer_class=AddGroupSerializer) def add_group(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) serializer = AddGroupSerializer(data=request.DATA) if serializer.is_valid(): if request.FILES == {}: group = Group( course=_course, title=serializer.data['title'], description=serializer.data['description'] ) else: group = Group( course=_course, title=serializer.data['title'], description=serializer.data['description'], image=request.FILES['image'] ) group.save() _course.playlist = playlist.append(group.id, _course.playlist) _course.save() return Response(GroupSerializer(group).data) else: content = serializer.errors return Response(content, status.HTTP_400_BAD_REQUEST) @action( methods=['PATCH'], permission_classes=((IsOwnerOrReadOnly,)), serializer_class=CourseSerializer) def reorder_groups(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) self.check_object_permissions(request, _course) myplaylist = request.DATA['playlist'] newplaylist = playlist.is_valid(myplaylist, _course.playlist) if newplaylist is not False: _course.playlist = newplaylist _course.save() return Response(_course.playlist) else: content = "Given order data does not have the correct format" return Response(content, status.HTTP_404_NOT_FOUND) @action(methods=['PATCH'], permission_classes=((IsRegistered,))) def set_marks_setting(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) history = CourseHistory.objects.get( course=_course, user=self.request.user) show_marks = self.request.QUERY_PARAMS.get('show', None) if(show_marks == 'true'): history.show_marks = True else: history.show_marks = False history.save() return Response(history.show_marks) @link(permission_classes=((IsRegistered,))) def get_marks_setting(self, request, pk=None): _course = get_object_or_404(Course, pk=pk) history = CourseHistory.objects.get( course=_course, user=self.request.user) return Response(history.show_marks) class OfferingViewSet(viewsets.ModelViewSet): """ ViewSet for model Offering """ model = Offering serializer_class = OfferingSerializer permission_classes = [IsInstructorOrReadOnly] def get_queryset(self): """ Optionally restricts the returned courses to a given category. """ queryset = Offering.objects.all() category = self.request.QUERY_PARAMS.get('category', None) if category is not None: queryset = queryset.filter(category__id=category) return queryset def create(self, request, pk=None, *args): """ Function for creating an offering in a category """ serializer = OfferingSerializer(data=request.DATA) if serializer.is_valid(): serializer.save() coursehistory = CourseHistory( user=request.user, course=serializer.object, active='A', is_owner=True ) coursehistory.save() # Usersetting for the discussion forum usersetting = UserSetting( user=request.user, forum=serializer.object.forum, super_user=True, moderator=True, badge='IN') usersetting.save() # send for approval now ## Create a 'General' tag for each course tag = Tag(forum=serializer.object.forum) tag.tag_name = 'General' tag.title = 'General' tag.save() return Response(serializer.data) else: content = serializer.errors return Response(content, status.HTTP_400_BAD_REQUEST) @link(permission_classes=((IsOwner, ))) def get_shortlisted_courses(self, request, pk=None): mycourse = get_object_or_404(Offering, pk=pk) self.check_object_permissions(request, mycourse) shortlist = mycourse.shortlisted_courses.all() serializer = CourseSerializer(shortlist, many=True) return Response(serializer.data) @action(methods=['POST'], permission_classes=((IsOwner, ))) def shortlist_course(self, request, pk=None): mycourse = get_object_or_404(Offering, pk=pk) self.check_object_permissions(request, mycourse) try: _course = Course.objects.get(pk=request.DATA['id'], type='T') except: return Response("Bad Request: No such textbook", status.HTTP_400_BAD_REQUEST) mycourse.shortlisted_courses.add(_course) return Response("Successfully shortlisted the course", status.HTTP_202_ACCEPTED) class CourseHistoryViewSet(mixins.UpdateModelMixin, viewsets.GenericViewSet): """ ViewSet for CourseHistory. Only gives Update option """ model = CourseHistory serializer_class = CourseHistorySerializer permission_classes = [IsInstructorOrReadOnly] @link(permission_classes=((IsRegistered, ))) def concept_history(self, request, pk=None): coursehistory = get_object_or_404(CourseHistory, pk=pk) self.check_object_permissions(request, coursehistory.course) history = ConceptHistory.objects.filter( user=request.user, course_history=coursehistory) serializer = ConceptHistorySerializer(history, many=True) return Response(serializer.data) class CourseInfoViewSet(viewsets.ModelViewSet): queryset = CourseInfo.objects.all() serializer_class = CourseInfoSerializer
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, with_statement from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine from tornado.routing import HostMatches, PathMatches, ReversibleRouter, Router, Rule, RuleRouter from tornado.testing import AsyncHTTPTestCase from tornado.web import Application, HTTPError, RequestHandler from tornado.wsgi import WSGIContainer class BasicRouter(Router): def find_handler(self, request, **kwargs): class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": "2"}), b"OK" ) self.connection.finish() return MessageDelegate(request.connection) class BasicRouterTestCase(AsyncHTTPTestCase): def get_app(self): return BasicRouter() def test_basic_router(self): response = self.fetch("/any_request") self.assertEqual(response.body, b"OK") resources = {} class GetResource(RequestHandler): def get(self, path): if path not in resources: raise HTTPError(404) self.finish(resources[path]) class PostResource(RequestHandler): def post(self, path): resources[path] = self.request.body class HTTPMethodRouter(Router): def __init__(self, app): self.app = app def find_handler(self, request, **kwargs): handler = GetResource if request.method == "GET" else PostResource return self.app.get_handler_delegate(request, handler, path_args=[request.path]) class HTTPMethodRouterTestCase(AsyncHTTPTestCase): def get_app(self): return HTTPMethodRouter(Application()) def test_http_method_router(self): response = self.fetch("/post_resource", method="POST", body="data") self.assertEqual(response.code, 200) response = self.fetch("/get_resource") self.assertEqual(response.code, 404) response = self.fetch("/post_resource") self.assertEqual(response.code, 200) self.assertEqual(response.body, b"data") def _get_named_handler(handler_name): class Handler(RequestHandler): def get(self, *args, **kwargs): if self.application.settings.get("app_name") is not None: self.write(self.application.settings["app_name"] + ": ") self.finish(handler_name + ": " + self.reverse_url(handler_name)) return Handler FirstHandler = _get_named_handler("first_handler") SecondHandler = _get_named_handler("second_handler") class CustomRouter(ReversibleRouter): def __init__(self): super(CustomRouter, self).__init__() self.routes = {} def add_routes(self, routes): self.routes.update(routes) def find_handler(self, request, **kwargs): if request.path in self.routes: app, handler = self.routes[request.path] return app.get_handler_delegate(request, handler) def reverse_url(self, name, *args): handler_path = '/' + name return handler_path if handler_path in self.routes else None class CustomRouterTestCase(AsyncHTTPTestCase): def get_app(self): class CustomApplication(Application): def reverse_url(self, name, *args): return router.reverse_url(name, *args) router = CustomRouter() app1 = CustomApplication(app_name="app1") app2 = CustomApplication(app_name="app2") router.add_routes({ "/first_handler": (app1, FirstHandler), "/second_handler": (app2, SecondHandler), "/first_handler_second_app": (app2, FirstHandler), }) return router def test_custom_router(self): response = self.fetch("/first_handler") self.assertEqual(response.body, b"app1: first_handler: /first_handler") response = self.fetch("/second_handler") self.assertEqual(response.body, b"app2: second_handler: /second_handler") response = self.fetch("/first_handler_second_app") self.assertEqual(response.body, b"app2: first_handler: /first_handler") class ConnectionDelegate(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): response_body = b"OK" self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": str(len(response_body))})) self.connection.write(response_body) self.connection.finish() return MessageDelegate(request_conn) class RuleRouterTest(AsyncHTTPTestCase): def get_app(self): app = Application() def request_callable(request): request.write(b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK") request.finish() app.add_handlers(".*", [ (HostMatches("www.example.com"), [ (PathMatches("/first_handler"), "tornado.test.routing_test.SecondHandler", {}, "second_handler") ]), Rule(PathMatches("/first_handler"), FirstHandler, name="first_handler"), Rule(PathMatches("/request_callable"), request_callable), ("/connection_delegate", ConnectionDelegate()) ]) return app def test_rule_based_router(self): response = self.fetch("/first_handler") self.assertEqual(response.body, b"first_handler: /first_handler") response = self.fetch("/first_handler", headers={'Host': 'www.example.com'}) self.assertEqual(response.body, b"second_handler: /first_handler") response = self.fetch("/connection_delegate") self.assertEqual(response.body, b"OK") response = self.fetch("/request_callable") self.assertEqual(response.body, b"OK") response = self.fetch("/404") self.assertEqual(response.code, 404) class WSGIContainerTestCase(AsyncHTTPTestCase): def get_app(self): wsgi_app = WSGIContainer(self.wsgi_app) class Handler(RequestHandler): def get(self, *args, **kwargs): self.finish(self.reverse_url("tornado")) return RuleRouter([ (PathMatches("/tornado.*"), Application([(r"/tornado/test", Handler, {}, "tornado")])), (PathMatches("/wsgi"), wsgi_app), ]) def wsgi_app(self, environ, start_response): start_response("200 OK", []) return [b"WSGI"] def test_wsgi_container(self): response = self.fetch("/tornado/test") self.assertEqual(response.body, b"/tornado/test") response = self.fetch("/wsgi") self.assertEqual(response.body, b"WSGI")
import random import operator import copy from collections import Counter class Env(object): directions = [[1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1]] empty = 'empty' @classmethod def flatten_list(cls, ls): l = [] for el in ls: if isinstance(el, list): [l.append(e) for e in Env.flatten_list(el)] else: l.append(el) return l @classmethod def build_uid(cls, col, row): return 'col'+str(col)+'row'+str(row) @classmethod def get_uid_by_direction(cls, tile, direction): return cls.build_uid(tile.col + direction[0], tile.row + direction[1]) @classmethod def can_occupy_tile(cls, tile): if tile: if tile.is_empty(): can_occupy = True else: can_occupy = False else: can_occupy = True return can_occupy class Tile: def __init__(self, label=Env.empty, preference=0., dif_phob=0.5, d_weight=0): self.label = label self.preference = preference self.dif_phob = 1 - dif_phob self.d_weight = d_weight self.col = None self.row = None self.neighbors = Counter() def set_position(self, col, row): self.col = col self.row = row def get_uid(self): return Env.build_uid(self.col, self.row) def is_empty(self): return True if self.label == Env.empty else False def add_neighbour(self, neighbour): self.neighbors.update([neighbour.label]) def clear_neighbors(self): self.neighbors = Counter() def wants_to_move(self): if self.is_empty(): return None no_of_neighbours = sum(self.neighbors.values()) percentage_similar = self.get_similar_to()/float(no_of_neighbours) percentage_different = self.get_different_to()/float(no_of_neighbours) if (percentage_similar <= self.preference or percentage_different >= self.dif_phob): return True else: return False def can_move_to(self, taken_spots): chosen_tile = None neighbors = {} for direction in Env.directions: t_uid = Env.get_uid_by_direction(self, direction) tile = taken_spots.get(t_uid, None) if tile and tile.is_empty(): neighbors[tile] = ((tile.get_similar_to(self.label) - 1) + (len(Env.directions) - tile.get_different_to(self.label))) try: max_tile = max(neighbors.iteritems(), key=operator.itemgetter(1))[0] if neighbors[max_tile] > (self.get_similar_to() + (len(Env.directions) - self.get_different_to())): chosen_tile = max_tile except ValueError: pass return chosen_tile def get_different_to(self, label=None): total = 0 label = self.label if not label else label for n in self.neighbors: if n != label and n != Env.empty: total += self.neighbors[n] return total def get_similar_to(self, l=None): return self.neighbors.get(l if l else self.label, 0) class World(object): def __init__(self, cols, rows, tiles): self.cols = cols self.rows = rows self.tiles_bucket = Env.flatten_list([[tile] * tile.d_weight for tile in tiles]) self.tiles = tiles self.tilemap = {} for row in range(rows): for col in range(cols): uid = Env.build_uid(col, row) tile_type = random.choice(self.tiles_bucket) tile = Tile(tile_type.label, tile_type.preference) tile.set_position(col, row) self.tilemap[uid] = tile self.update_tiles() def run_iteration(self): self.random_process_movement() self.update_tiles() def update_tiles(self): for tile in self.tilemap.values(): self.update_negibors(tile) def random_process_movement(self): temp_map = copy.deepcopy(self.tilemap) while len(self.tilemap) > 0: tile = self.tilemap.pop(random.choice(self.tilemap.keys())) if tile.is_empty(): continue if tile.wants_to_move(): to_tile = tile.can_move_to(temp_map) if to_tile: new_tile, new_uid = self.get_new_tile(tile, to_tile) old_uid = tile.get_uid() temp_map[old_uid] = self.get_empty_tile(tile) else: new_tile, new_uid = self.get_new_tile(tile, tile) temp_map[new_uid] = new_tile else: new_tile, new_uid = self.get_new_tile(tile, tile) temp_map[new_uid] = new_tile self.tilemap = temp_map def get_empty_tile(self, tile): empty_tile = Tile() empty_tile.set_position(tile.col, tile.row) return empty_tile def get_new_tile(self, old_tile, to_tile): new_tile = Tile(old_tile.label, old_tile.preference, old_tile.dif_phob) new_tile.set_position(to_tile.col, to_tile.row) new_uid = new_tile.get_uid() return new_tile, new_uid def update_negibors(self, tile): tile.clear_neighbors() for direction in Env.directions: uid = Env.get_uid_by_direction(tile, direction) n = self.tilemap.get(uid, None) if n: tile.add_neighbour(n) def show_world(self): for row in range(self.rows): for col in range(self.cols): uid = Env.build_uid(col, row) t = self.tilemap[uid] print(t.label), print() """ # usage # create a base tile for each type, also add the empty tile # d_weight represents the proportion of a specific tile type during map init a1 = Tile(label='red', preference=0.5, dif_phob=0., d_weight=2) a2 = Tile(label='blue', preference=0.5, dif_phob=0., d_weight=2) e = Tile(label=Env.empty, d_weight=1) base_tiles = [a2, a1, e] w = World(7, 7, base_tiles) w.show_world() w.run_iteration() print '+' * 60 w.show_world() w.run_iteration() print '+' * 60 w.show_world() """
import collections import functools import json import os import sys import time import traceback import uuid from django import http from django.core.exceptions import PermissionDenied from django.core.files.storage import default_storage as storage from django.conf import settings from django import forms as django_forms from django.db import transaction from django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render from django.utils.http import urlquote from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt import commonware.log from PIL import Image from session_csrf import anonymous_csrf from tower import ugettext_lazy as _lazy, ugettext as _ import waffle from waffle.decorators import waffle_switch from applications.models import Application, AppVersion import amo import amo.utils from amo import messages from amo.decorators import json_view, login_required, post_required from amo.helpers import absolutify, loc, urlparams from amo.utils import escape_all, HttpResponseSendFile, MenuItem from amo.urlresolvers import reverse from access import acl from addons import forms as addon_forms from addons.decorators import addon_view from addons.models import Addon, AddonUser from addons.views import BaseFilter from devhub.decorators import dev_required from devhub.forms import CheckCompatibilityForm from devhub.models import ActivityLog, BlogPost, RssKey, SubmitStep from devhub import perf from editors.helpers import ReviewHelper, get_position from files.models import File, FileUpload, Platform from files.utils import parse_addon from market.models import Refund from paypal.check import Check import paypal from search.views import BaseAjaxSearch from stats.models import Contribution from translations.models import delete_translation from users.models import UserProfile from versions.models import Version from mkt.webapps.models import Webapp from zadmin.models import ValidationResult from . import forms, tasks, feeds, signals log = commonware.log.getLogger('z.devhub') paypal_log = commonware.log.getLogger('z.paypal') # We use a session cookie to make sure people see the dev agreement. DEV_AGREEMENT_COOKIE = 'yes-I-read-the-dev-agreement' class AddonFilter(BaseFilter): opts = (('name', _lazy(u'Name')), ('updated', _lazy(u'Updated')), ('created', _lazy(u'Created')), ('popular', _lazy(u'Downloads')), ('rating', _lazy(u'Rating'))) class AppFilter(BaseFilter): opts = (('name', _lazy(u'Name')), ('created', _lazy(u'Created')), ('downloads', _lazy(u'Weekly Downloads')), ('rating', _lazy(u'Rating'))) def addon_listing(request, default='name', webapp=False, theme=False): """Set up the queryset and filtering for addon listing for Dashboard.""" Filter = AppFilter if webapp else AddonFilter if webapp: qs = Webapp.objects.filter( id__in=request.amo_user.addons.filter(type=amo.ADDON_WEBAPP)) model = Webapp elif theme: qs = request.amo_user.addons.filter(type=amo.ADDON_PERSONA) model = Addon else: qs = request.amo_user.addons.exclude(type__in=[amo.ADDON_WEBAPP, amo.ADDON_PERSONA]) model = Addon filter = Filter(request, qs, 'sort', default, model=model) return filter.qs, filter def index(request): if settings.APP_PREVIEW: # This can be a permanent redirect when we finalize devhub for apps. return redirect('devhub.apps') ctx = {'blog_posts': _get_posts()} if request.amo_user: user_addons = request.amo_user.addons.exclude(type=amo.ADDON_WEBAPP) recent_addons = user_addons.order_by('-modified')[:3] ctx['recent_addons'] = [] for addon in recent_addons: ctx['recent_addons'].append({'addon': addon, 'position': get_position(addon)}) return render(request, 'devhub/index.html', ctx) @login_required def dashboard(request, webapp=False, theme=False): addon_items = _get_items( None, request.amo_user.addons.exclude(type=amo.ADDON_WEBAPP))[:4] data = dict(rss=_get_rss_feed(request), blog_posts=_get_posts(), timestamp=int(time.time()), addon_tab=not webapp and not theme, webapp=webapp, theme=theme, addon_items=addon_items) if data['addon_tab']: addons, data['filter'] = addon_listing(request, webapp=webapp) data['addons'] = amo.utils.paginate(request, addons, per_page=10) if theme: themes, data['filter'] = addon_listing(request, theme=True) data['themes'] = amo.utils.paginate(request, themes, per_page=10) if 'filter' in data: data['sorting'] = data['filter'].field data['sort_opts'] = data['filter'].opts return render(request, 'devhub/addons/dashboard.html', data) @dev_required def ajax_compat_status(request, addon_id, addon): if not (addon.accepts_compatible_apps() and addon.current_version): raise http.Http404() return render(request, 'devhub/addons/ajax_compat_status.html', dict(addon=addon)) @dev_required def ajax_compat_error(request, addon_id, addon): if not (addon.accepts_compatible_apps() and addon.current_version): raise http.Http404() return render(request, 'devhub/addons/ajax_compat_error.html', dict(addon=addon)) @dev_required def ajax_compat_update(request, addon_id, addon, version_id): if not addon.accepts_compatible_apps(): raise http.Http404() version = get_object_or_404(Version, pk=version_id, addon=addon) compat_form = forms.CompatFormSet(request.POST or None, queryset=version.apps.all()) if request.method == 'POST' and compat_form.is_valid(): for compat in compat_form.save(commit=False): compat.version = version compat.save() for form in compat_form.forms: if (isinstance(form, forms.CompatForm) and 'max' in form.changed_data): _log_max_version_change(addon, version, form.instance) return render(request, 'devhub/addons/ajax_compat_update.html', dict(addon=addon, version=version, compat_form=compat_form)) def _get_addons(request, addons, addon_id, action): """Create a list of ``MenuItem``s for the activity feed.""" items = [] a = MenuItem() a.selected = (not addon_id) (a.text, a.url) = (_('All My Add-ons'), reverse('devhub.feed_all')) if action: a.url += '?action=' + action items.append(a) for addon in addons: item = MenuItem() try: item.selected = (addon_id and addon.id == int(addon_id)) except ValueError: pass # We won't get here... EVER url = reverse('devhub.feed', args=[addon.slug]) if action: url += '?action=' + action item.text, item.url = addon.name, url items.append(item) return items def _get_posts(limit=5): return BlogPost.objects.order_by('-date_posted')[0:limit] def _get_activities(request, action): url = request.get_full_path() choices = (None, 'updates', 'status', 'collections', 'reviews') text = {None: _('All Activity'), 'updates': _('Add-on Updates'), 'status': _('Add-on Status'), 'collections': _('User Collections'), 'reviews': _('User Reviews'), } items = [] for c in choices: i = MenuItem() i.text = text[c] i.url, i.selected = urlparams(url, page=None, action=c), (action == c) items.append(i) return items def _get_items(action, addons): filters = dict(updates=(amo.LOG.ADD_VERSION, amo.LOG.ADD_FILE_TO_VERSION), status=(amo.LOG.USER_DISABLE, amo.LOG.USER_ENABLE, amo.LOG.CHANGE_STATUS, amo.LOG.APPROVE_VERSION,), collections=(amo.LOG.ADD_TO_COLLECTION, amo.LOG.REMOVE_FROM_COLLECTION,), reviews=(amo.LOG.ADD_REVIEW,)) filter = filters.get(action) items = (ActivityLog.objects.for_addons(addons).filter() .exclude(action__in=amo.LOG_HIDE_DEVELOPER)) if filter: items = items.filter(action__in=[i.id for i in filter]) return items def _get_rss_feed(request): key, __ = RssKey.objects.get_or_create(user=request.amo_user) return urlparams(reverse('devhub.feed_all'), privaterss=key.key) def feed(request, addon_id=None): if request.GET.get('privaterss'): return feeds.ActivityFeedRSS()(request) addon_selected = None if not request.user.is_authenticated(): url = reverse('users.login') p = urlquote(request.get_full_path()) return http.HttpResponseRedirect('%s?to=%s' % (url, p)) else: # We exclude apps on AMO. addons_all = request.amo_user.addons.exclude(type=amo.ADDON_WEBAPP) if addon_id: addon = get_object_or_404(Addon.objects.id_or_slug(addon_id)) addons = addon # common query set try: key = RssKey.objects.get(addon=addons) except RssKey.DoesNotExist: key = RssKey.objects.create(addon=addons) addon_selected = addon.id rssurl = urlparams(reverse('devhub.feed', args=[addon_id]), privaterss=key.key) if not acl.check_addon_ownership(request, addons, viewer=True, ignore_disabled=True): raise PermissionDenied else: rssurl = _get_rss_feed(request) addon = None addons = addons_all action = request.GET.get('action') items = _get_items(action, addons) activities = _get_activities(request, action) addon_items = _get_addons(request, addons_all, addon_selected, action) pager = amo.utils.paginate(request, items, 20) data = dict(addons=addon_items, pager=pager, activities=activities, rss=rssurl, addon=addon) return render(request, 'devhub/addons/activity.html', data) @dev_required(webapp=True) def edit(request, addon_id, addon, webapp=False): url_prefix = 'apps' if webapp else 'addons' data = { 'page': 'edit', 'addon': addon, 'webapp': webapp, 'url_prefix': url_prefix, 'valid_slug': addon.slug, 'tags': addon.tags.not_blacklisted().values_list('tag_text', flat=True), 'previews': addon.previews.all(), } if (not webapp and acl.action_allowed(request, 'Addons', 'Configure')): data['admin_form'] = forms.AdminForm(instance=addon) return render(request, 'devhub/addons/edit.html', data) @dev_required(theme=True) def edit_theme(request, addon_id, addon, theme=False): form = addon_forms.EditThemeForm(data=request.POST or None, request=request, instance=addon) owner_form = addon_forms.EditThemeOwnerForm(data=request.POST or None, instance=addon) if request.method == 'POST': if 'owner_submit' in request.POST: if owner_form.is_valid(): owner_form.save() messages.success(request, _('Changes successfully saved.')) return redirect('devhub.themes.edit', addon.slug) elif form.is_valid(): form.save() messages.success(request, _('Changes successfully saved.')) return redirect('devhub.themes.edit', addon.reload().slug) else: messages.error(request, _('Please check the form for errors.')) return render(request, 'devhub/personas/edit.html', { 'addon': addon, 'persona': addon.persona, 'form': form, 'owner_form': owner_form}) @dev_required(owner_for_post=True, webapp=True, theme=True) @post_required def delete(request, addon_id, addon, webapp=False, theme=False): # Database deletes only allowed for free or incomplete addons. if not addon.can_be_deleted(): if webapp: msg = loc('App cannot be deleted. Disable this app instead.') else: msg = _('Add-on cannot be deleted. Disable this add-on instead.') messages.error(request, msg) return redirect(addon.get_dev_url('versions')) form = forms.DeleteForm(request) if form.is_valid(): reason = form.cleaned_data.get('reason', '') addon.delete(msg='Removed via devhub', reason=reason) messages.success(request, _('Theme deleted.') if theme else _('Add-on deleted.')) return redirect('devhub.%s' % ('apps' if webapp else 'themes' if theme else 'addons')) else: if theme: messages.error(request, _('Password was incorrect. Theme was not deleted.')) return redirect(addon.get_dev_url()) else: messages.error(request, _('Password was incorrect. Add-on was not deleted.')) return redirect(addon.get_dev_url('versions')) @dev_required @post_required def enable(request, addon_id, addon): addon.update(disabled_by_user=False) amo.log(amo.LOG.USER_ENABLE, addon) return redirect(addon.get_dev_url('versions')) @dev_required(owner_for_post=True) @post_required def cancel(request, addon_id, addon): if addon.status in amo.STATUS_UNDER_REVIEW: if addon.status == amo.STATUS_LITE_AND_NOMINATED: addon.update(status=amo.STATUS_LITE) else: addon.update(status=amo.STATUS_NULL) amo.log(amo.LOG.CHANGE_STATUS, addon.get_status_display(), addon) return redirect(addon.get_dev_url('versions')) @dev_required @post_required def disable(request, addon_id, addon): addon.update(disabled_by_user=True) amo.log(amo.LOG.USER_DISABLE, addon) return redirect(addon.get_dev_url('versions')) @dev_required(owner_for_post=True, webapp=True) def ownership(request, addon_id, addon, webapp=False): fs, ctx = [], {} # Authors. qs = AddonUser.objects.filter(addon=addon).order_by('position') user_form = forms.AuthorFormSet(request.POST or None, queryset=qs) fs.append(user_form) # Versions. license_form = forms.LicenseForm(request.POST or None, addon=addon) if not addon.is_webapp(): ctx.update(license_form.get_context()) if ctx['license_form']: # if addon has a version fs.append(ctx['license_form']) # Policy. policy_form = forms.PolicyForm(request.POST or None, addon=addon) if not addon.is_webapp(): ctx.update(policy_form=policy_form) fs.append(policy_form) if request.method == 'POST' and all([form.is_valid() for form in fs]): # Authors. authors = user_form.save(commit=False) for author in authors: action = None if not author.id or author.user_id != author._original_user_id: action = amo.LOG.ADD_USER_WITH_ROLE author.addon = addon elif author.role != author._original_role: action = amo.LOG.CHANGE_USER_WITH_ROLE author.save() if action: amo.log(action, author.user, author.get_role_display(), addon) if (author._original_user_id and author.user_id != author._original_user_id): amo.log(amo.LOG.REMOVE_USER_WITH_ROLE, (UserProfile, author._original_user_id), author.get_role_display(), addon) for author in user_form.deleted_objects: amo.log(amo.LOG.REMOVE_USER_WITH_ROLE, author.user, author.get_role_display(), addon) if license_form in fs: license_form.save() if policy_form in fs: policy_form.save() messages.success(request, _('Changes successfully saved.')) return redirect(addon.get_dev_url('owner')) ctx.update(addon=addon, webapp=webapp, user_form=user_form) return render(request, 'devhub/addons/owner.html', ctx) @dev_required(owner_for_post=True, webapp=True) def payments(request, addon_id, addon, webapp=False): if addon.is_premium(): return _premium(request, addon_id, addon, webapp) return _voluntary(request, addon_id, addon, webapp) def _premium(request, addon_id, addon, webapp=False): premium_form = forms.PremiumForm(request.POST or None, request=request, extra={'addon': addon, 'amo_user': request.amo_user, 'dest': 'payment'}) if request.method == 'POST' and premium_form.is_valid(): premium_form.save() messages.success(request, _('Changes successfully saved.')) return redirect(addon.get_dev_url('payments')) return render(request, 'devhub/payments/premium.html', dict(addon=addon, webapp=webapp, premium=addon.premium, form=premium_form)) def _voluntary(request, addon_id, addon, webapp): charity = None if addon.charity_id == amo.FOUNDATION_ORG else addon.charity charity_form = forms.CharityForm(request.POST or None, instance=charity, prefix='charity') contrib_form = forms.ContribForm(request.POST or None, instance=addon, initial=forms.ContribForm.initial(addon)) profile_form = forms.ProfileForm(request.POST or None, instance=addon, required=True) if request.method == 'POST': if contrib_form.is_valid(): addon = contrib_form.save(commit=False) addon.wants_contributions = True valid = _save_charity(addon, contrib_form, charity_form) if not addon.has_full_profile(): valid &= profile_form.is_valid() if valid: profile_form.save() if valid: addon.save() messages.success(request, _('Changes successfully saved.')) amo.log(amo.LOG.EDIT_CONTRIBUTIONS, addon) return redirect(addon.get_dev_url('payments')) errors = charity_form.errors or contrib_form.errors or profile_form.errors if errors: messages.error(request, _('There were errors in your submission.')) return render(request, 'devhub/payments/payments.html', dict(addon=addon, webapp=webapp, errors=errors, charity_form=charity_form, contrib_form=contrib_form, profile_form=profile_form)) def _save_charity(addon, contrib_form, charity_form): recipient = contrib_form.cleaned_data['recipient'] if recipient == 'dev': addon.charity = None elif recipient == 'moz': addon.charity_id = amo.FOUNDATION_ORG elif recipient == 'org': if charity_form.is_valid(): addon.charity = charity_form.save() else: return False return True @waffle_switch('allow-refund') @dev_required(webapp=True) def issue_refund(request, addon_id, addon, webapp=False): txn_id = request.REQUEST.get('transaction_id') if not txn_id: raise http.Http404 form_enabled = True contribution = get_object_or_404(Contribution, transaction_id=txn_id, type=amo.CONTRIB_PURCHASE) if Refund.objects.filter(contribution=contribution).exists(): messages.error(request, _('Refund already processed.')) form_enabled = False elif request.method == 'POST': if 'issue' in request.POST: try: results = paypal.refund(contribution.paykey) except paypal.PaypalError, e: messages.error(request, _('Refund failed. error: %s') % e) contribution.record_failed_refund(e) else: for res in results: if res['refundStatus'] == 'ALREADY_REVERSED_OR_REFUNDED': paypal_log.debug( 'Refund attempt for already-refunded paykey: %s, ' '%s' % (contribution.paykey, res['receiver.email'])) messages.error(request, _('Refund was previously issued; ' 'no action taken.')) return redirect(addon.get_dev_url('refunds')) contribution.mail_approved() refund = contribution.enqueue_refund(amo.REFUND_APPROVED, request.amo_user) paypal_log.info('Refund %r issued for contribution %r' % (refund.pk, contribution.pk)) messages.success(request, _('Refund issued.')) else: contribution.mail_declined() # TODO: Consider requiring a rejection reason for declined refunds. refund = contribution.enqueue_refund(amo.REFUND_DECLINED, request.amo_user) paypal_log.info('Refund %r declined for contribution %r' % (refund.pk, contribution.pk)) messages.success(request, _('Refund declined.')) return redirect(addon.get_dev_url('refunds')) return render(request, 'devhub/payments/issue-refund.html', {'enabled': form_enabled, 'contribution': contribution, 'addon': addon, 'webapp': webapp, 'transaction_id': txn_id}) @waffle_switch('allow-refund') @dev_required(webapp=True) # TODO: Make sure 'Support' staff can access this. def refunds(request, addon_id, addon, webapp=False): ctx = {'addon': addon, 'webapp': webapp} queues = { 'pending': Refund.objects.pending(addon), 'approved': Refund.objects.approved(addon), 'instant': Refund.objects.instant(addon), 'declined': Refund.objects.declined(addon), } # For now set the limit to something stupid so this is stupid easy to QA. for status, refunds in queues.iteritems(): ctx[status] = amo.utils.paginate(request, refunds, per_page=5) return render(request, 'devhub/payments/refunds.html', ctx) @dev_required @post_required def disable_payments(request, addon_id, addon): addon.update(wants_contributions=False) return redirect(addon.get_dev_url('payments')) @dev_required(webapp=True) @post_required def remove_profile(request, addon_id, addon, webapp=False): delete_translation(addon, 'the_reason') delete_translation(addon, 'the_future') if addon.wants_contributions: addon.update(wants_contributions=False) return redirect(addon.get_dev_url('profile')) @dev_required(webapp=True) def profile(request, addon_id, addon, webapp=False): profile_form = forms.ProfileForm(request.POST or None, instance=addon) if request.method == 'POST' and profile_form.is_valid(): profile_form.save() amo.log(amo.LOG.EDIT_PROPERTIES, addon) messages.success(request, _('Changes successfully saved.')) return redirect(addon.get_dev_url('profile')) return render(request, 'devhub/addons/profile.html', dict(addon=addon, webapp=webapp, profile_form=profile_form)) @login_required @post_required @json_view def compat_application_versions(request): app_id = request.POST['application_id'] f = CheckCompatibilityForm() return {'choices': f.version_choices_for_app_id(app_id)} @login_required def validate_addon(request): return render(request, 'devhub/validate_addon.html', {'title': _('Validate Add-on'), 'upload_url': reverse('devhub.standalone_upload')}) @login_required def check_addon_compatibility(request): form = CheckCompatibilityForm() return render(request, 'devhub/validate_addon.html', {'appversion_form': form, 'title': _('Check Add-on Compatibility'), 'upload_url': reverse('devhub.standalone_upload')}) @dev_required @json_view def file_perf_tests_start(request, addon_id, addon, file_id): if not waffle.flag_is_active(request, 'perf-tests'): raise PermissionDenied file_ = get_object_or_404(File, pk=file_id) plats = perf.PLATFORM_MAP.get(file_.platform.id, None) if plats is None: log.info('Unsupported performance platform %s for file %s' % (file_.platform.id, file_)) # TODO(Kumar) provide a message about this return {'success': False} for app in perf.ALL_APPS: for plat in plats: tasks.start_perf_test_for_file.delay(file_.id, plat, app) return {'success': True} def packager_path(name): return os.path.join(settings.PACKAGER_PATH, '%s.zip' % name) @anonymous_csrf def package_addon(request): basic_form = forms.PackagerBasicForm(request.POST or None) features_form = forms.PackagerFeaturesForm(request.POST or None) compat_forms = forms.PackagerCompatFormSet(request.POST or None) # Process requests, but also avoid short circuiting by using all(). if (request.method == 'POST' and all([basic_form.is_valid(), features_form.is_valid(), compat_forms.is_valid()])): basic_data = basic_form.cleaned_data compat_data = compat_forms.cleaned_data data = {'id': basic_data['id'], 'version': basic_data['version'], 'name': basic_data['name'], 'slug': basic_data['package_name'], 'description': basic_data['description'], 'author_name': basic_data['author_name'], 'contributors': basic_data['contributors'], 'targetapplications': [c for c in compat_data if c['enabled']]} tasks.packager.delay(data, features_form.cleaned_data) return redirect('devhub.package_addon_success', basic_data['package_name']) return render(request, 'devhub/package_addon.html', {'basic_form': basic_form, 'compat_forms': compat_forms, 'features_form': features_form}) def package_addon_success(request, package_name): """Return the success page for the add-on packager.""" return render(request, 'devhub/package_addon_success.html', {'package_name': package_name}) @json_view def package_addon_json(request, package_name): """Return the URL of the packaged add-on.""" path_ = packager_path(package_name) if storage.exists(path_): url = reverse('devhub.package_addon_download', args=[package_name]) return {'download_url': url, 'filename': os.path.basename(path_), 'size': round(storage.open(path_).size / 1024, 1)} def package_addon_download(request, package_name): """Serve a packaged add-on.""" path_ = packager_path(package_name) if not storage.exists(path_): raise http.Http404() return HttpResponseSendFile(request, path_, content_type='application/zip') @login_required @post_required def upload(request, addon_slug=None, is_standalone=False): filedata = request.FILES['upload'] fu = FileUpload.from_post(filedata, filedata.name, filedata.size) log.info('FileUpload created: %s' % fu.pk) if request.user.is_authenticated(): fu.user = request.amo_user fu.save() if request.POST.get('app_id') and request.POST.get('version_id'): app = get_object_or_404(Application, pk=request.POST['app_id']) ver = get_object_or_404(AppVersion, pk=request.POST['version_id']) tasks.compatibility_check.delay(fu.pk, app.guid, ver.version) else: tasks.validator.delay(fu.pk) if addon_slug: return redirect('devhub.upload_detail_for_addon', addon_slug, fu.pk) elif is_standalone: return redirect('devhub.standalone_upload_detail', fu.pk) else: return redirect('devhub.upload_detail', fu.pk, 'json') @login_required @post_required @json_view def upload_manifest(request): form = forms.NewManifestForm(request.POST) if form.is_valid(): upload = FileUpload.objects.create() tasks.fetch_manifest.delay(form.cleaned_data['manifest'], upload.pk) return redirect('devhub.upload_detail', upload.pk, 'json') else: error_text = _('There was an error with the submission.') if 'manifest' in form.errors: error_text = ' '.join(form.errors['manifest']) error_message = {'type': 'error', 'message': error_text, 'tier': 1} v = {'errors': 1, 'success': False, 'messages': [error_message]} return make_validation_result(dict(validation=v, error=error_text)) @login_required @post_required def standalone_upload(request): return upload(request, is_standalone=True) @login_required @json_view def standalone_upload_detail(request, uuid): upload = get_object_or_404(FileUpload, uuid=uuid) url = reverse('devhub.standalone_upload_detail', args=[uuid]) return upload_validation_context(request, upload, url=url) @post_required @dev_required def upload_for_addon(request, addon_id, addon): return upload(request, addon_slug=addon.slug) @dev_required @json_view def upload_detail_for_addon(request, addon_id, addon, uuid): upload = get_object_or_404(FileUpload, uuid=uuid) return json_upload_detail(request, upload, addon_slug=addon.slug) def make_validation_result(data, is_compatibility=False): """Safe wrapper around JSON dict containing a validation result. Keyword Arguments **is_compatibility=False** When True, errors will be summarized as if they were in a regular validation result. """ if not settings.EXPOSE_VALIDATOR_TRACEBACKS: if data['error']: # Just expose the message, not the traceback data['error'] = data['error'].strip().split('\n')[-1].strip() if data['validation']: lim = settings.VALIDATOR_MESSAGE_LIMIT if lim: del (data['validation']['messages'] [settings.VALIDATOR_MESSAGE_LIMIT:]) ending_tier = data['validation'].get('ending_tier', 0) for msg in data['validation']['messages']: if msg['tier'] > ending_tier: ending_tier = msg['tier'] if msg['tier'] == 0: # We can't display a message if it's on tier 0. # Should get fixed soon in bug 617481 msg['tier'] = 1 for k, v in msg.items(): msg[k] = escape_all(v) if lim: compatibility_count = 0 if data['validation'].get('compatibility_summary'): cs = data['validation']['compatibility_summary'] compatibility_count = (cs['errors'] + cs['warnings'] + cs['notices']) else: cs = {} leftover_count = (data['validation'].get('errors', 0) + data['validation'].get('warnings', 0) + data['validation'].get('notices', 0) + compatibility_count - lim) if leftover_count > 0: msgtype = 'notice' if is_compatibility: if cs.get('errors'): msgtype = 'error' elif cs.get('warnings'): msgtype = 'warning' else: if data['validation']['errors']: msgtype = 'error' elif data['validation']['warnings']: msgtype = 'warning' data['validation']['messages'].append( {'tier': 1, 'type': msgtype, 'message': (_('Validation generated too many errors/' 'warnings so %s messages were truncated. ' 'After addressing the visible messages, ' "you'll be able to see the others.") % (leftover_count,)), 'compatibility_type': None }) if is_compatibility: compat = data['validation']['compatibility_summary'] for k in ('errors', 'warnings', 'notices'): data['validation'][k] = compat[k] for msg in data['validation']['messages']: if msg['compatibility_type']: msg['type'] = msg['compatibility_type'] data['validation']['ending_tier'] = ending_tier return data @dev_required(allow_editors=True) def file_validation(request, addon_id, addon, file_id): file = get_object_or_404(File, id=file_id) v = reverse('devhub.json_file_validation', args=[addon.slug, file.id]) return render(request, 'devhub/validation.html', dict(validate_url=v, filename=file.filename, timestamp=file.created, addon=addon)) @dev_required(allow_editors=True) def bulk_compat_result(request, addon_id, addon, result_id): qs = ValidationResult.objects.exclude(completed=None) result = get_object_or_404(qs, pk=result_id) job = result.validation_job revalidate_url = reverse('devhub.json_bulk_compat_result', args=[addon.slug, result.id]) return _compat_result(request, revalidate_url, job.application, job.target_version, for_addon=result.file.version.addon, validated_filename=result.file.filename, validated_ts=result.completed) def _compat_result(request, revalidate_url, target_app, target_version, validated_filename=None, validated_ts=None, for_addon=None): app_trans = dict((g, unicode(a.pretty)) for g, a in amo.APP_GUIDS.items()) ff_versions = (AppVersion.objects.filter(application=amo.FIREFOX.id, version_int__gte=4000000000000) .values_list('application', 'version') .order_by('version_int')) tpl = 'https://developer.mozilla.org/en/Firefox_%s_for_developers' change_links = dict() for app, ver in ff_versions: major = ver.split('.')[0] # 4.0b3 -> 4 change_links['%s %s' % (amo.APP_IDS[app].guid, ver)] = tpl % major return render(request, 'devhub/validation.html', dict(validate_url=revalidate_url, filename=validated_filename, timestamp=validated_ts, target_app=target_app, target_version=target_version, addon=for_addon, result_type='compat', app_trans=app_trans, version_change_links=change_links)) @json_view @csrf_exempt @dev_required(allow_editors=True) def json_file_validation(request, addon_id, addon, file_id): file = get_object_or_404(File, id=file_id) if not file.has_been_validated: if request.method != 'POST': return http.HttpResponseNotAllowed(['POST']) try: v_result = tasks.file_validator(file.id) except Exception, exc: log.error('file_validator(%s): %s' % (file.id, exc)) error = "\n".join(traceback.format_exception(*sys.exc_info())) return make_validation_result({'validation': '', 'error': error}) else: v_result = file.validation validation = json.loads(v_result.validation) return make_validation_result(dict(validation=validation, error=None)) @json_view @csrf_exempt @post_required @dev_required(allow_editors=True) def json_bulk_compat_result(request, addon_id, addon, result_id): qs = ValidationResult.objects.exclude(completed=None) result = get_object_or_404(qs, pk=result_id) if result.task_error: return make_validation_result({'validation': '', 'error': result.task_error}) else: validation = json.loads(result.validation) return make_validation_result(dict(validation=validation, error=None)) @json_view def json_upload_detail(request, upload, addon_slug=None): addon = None if addon_slug: addon = get_object_or_404(Addon, slug=addon_slug) result = upload_validation_context(request, upload, addon=addon) plat_exclude = [] if result['validation']: try: pkg = parse_addon(upload, addon=addon) except django_forms.ValidationError, exc: errors_before = result['validation'].get('errors', 0) # FIXME: This doesn't guard against client-side # tinkering. for i, msg in enumerate(exc.messages): # Simulate a validation error so the UI displays # it as such result['validation']['messages'].insert( i, {'type': 'error', 'message': msg, 'tier': 1, 'fatal': True}) result['validation']['errors'] += 1 if not errors_before: return json_view.error(make_validation_result(result)) else: app_ids = set([a.id for a in pkg.get('apps', [])]) supported_platforms = [] for app in (amo.MOBILE, amo.ANDROID): if app.id in app_ids: supported_platforms.extend(amo.MOBILE_PLATFORMS.keys()) app_ids.remove(app.id) if len(app_ids): # Targets any other non-mobile app: supported_platforms.extend(amo.DESKTOP_PLATFORMS.keys()) s = amo.SUPPORTED_PLATFORMS.keys() plat_exclude = set(s) - set(supported_platforms) plat_exclude = [str(p) for p in plat_exclude] result['platforms_to_exclude'] = plat_exclude return result def upload_validation_context(request, upload, addon_slug=None, addon=None, url=None): if addon_slug and not addon: addon = get_object_or_404(Addon, slug=addon_slug) if not settings.VALIDATE_ADDONS: upload.task_error = '' upload.validation = json.dumps({'errors': 0, 'messages': [], 'metadata': {}, 'notices': 0, 'warnings': 0}) upload.save() validation = json.loads(upload.validation) if upload.validation else "" if not url: if addon: url = reverse('devhub.upload_detail_for_addon', args=[addon.slug, upload.uuid]) else: url = reverse('devhub.upload_detail', args=[upload.uuid, 'json']) full_report_url = reverse('devhub.upload_detail', args=[upload.uuid]) return make_validation_result(dict(upload=upload.uuid, validation=validation, error=upload.task_error, url=url, full_report_url=full_report_url), is_compatibility=upload.compat_with_app) @login_required def upload_detail(request, uuid, format='html'): upload = get_object_or_404(FileUpload, uuid=uuid) if format == 'json' or request.is_ajax(): return json_upload_detail(request, upload) validate_url = reverse('devhub.standalone_upload_detail', args=[upload.uuid]) if upload.compat_with_app: return _compat_result(request, validate_url, upload.compat_with_app, upload.compat_with_appver) return render(request, 'devhub/validation.html', dict(validate_url=validate_url, filename=upload.name, timestamp=upload.created)) class AddonDependencySearch(BaseAjaxSearch): # No personas. No webapps. types = [amo.ADDON_ANY, amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_LPAPP] class AppDependencySearch(BaseAjaxSearch): # Only webapps. types = [amo.ADDON_WEBAPP] @dev_required @json_view def ajax_dependencies(request, addon_id, addon): s = AppDependencySearch if addon.is_webapp() else AddonDependencySearch return s(request, excluded_ids=[addon_id]).items @dev_required(webapp=True) def addons_section(request, addon_id, addon, section, editable=False, webapp=False): basic = addon_forms.AppFormBasic if webapp else addon_forms.AddonFormBasic models = {'basic': basic, 'media': addon_forms.AddonFormMedia, 'details': addon_forms.AddonFormDetails, 'support': addon_forms.AddonFormSupport, 'technical': addon_forms.AddonFormTechnical, 'admin': forms.AdminForm} if section not in models: raise http.Http404() tags, previews, restricted_tags = [], [], [] cat_form = dependency_form = None if section == 'basic': tags = addon.tags.not_blacklisted().values_list('tag_text', flat=True) cat_form = addon_forms.CategoryFormSet(request.POST or None, addon=addon, request=request) restricted_tags = addon.tags.filter(restricted=True) elif section == 'media': previews = forms.PreviewFormSet(request.POST or None, prefix='files', queryset=addon.previews.all()) elif section == 'technical': if not webapp: dependency_form = forms.DependencyFormSet(request.POST or None, queryset=addon.addons_dependencies.all(), addon=addon, prefix='dependencies') # Get the slug before the form alters it to the form data. valid_slug = addon.slug if editable: if request.method == 'POST': if section == 'license': form = models[section](request.POST) else: form = models[section](request.POST, request.FILES, instance=addon, request=request) if form.is_valid() and (not previews or previews.is_valid()): addon = form.save(addon) if previews: for preview in previews.forms: preview.save(addon) editable = False if section == 'media': amo.log(amo.LOG.CHANGE_ICON, addon) else: amo.log(amo.LOG.EDIT_PROPERTIES, addon) valid_slug = addon.slug if cat_form: if cat_form.is_valid(): cat_form.save() addon.save() else: editable = True if dependency_form: if dependency_form.is_valid(): dependency_form.save() else: editable = True else: if section == 'license': form = models[section]() else: form = models[section](instance=addon, request=request) else: form = False url_prefix = 'apps' if webapp else 'addons' data = {'addon': addon, 'webapp': webapp, 'url_prefix': url_prefix, 'form': form, 'editable': editable, 'tags': tags, 'restricted_tags': restricted_tags, 'cat_form': cat_form, 'preview_form': previews, 'dependency_form': dependency_form, 'valid_slug': valid_slug} return render(request, 'devhub/addons/edit/%s.html' % section, data) @never_cache @dev_required(theme=True) @json_view def image_status(request, addon_id, addon, theme=False): # Default icon needs no checking. if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon': icons = True # Persona icon is handled differently. elif addon.type == amo.ADDON_PERSONA: icons = True else: icons = storage.exists(os.path.join(addon.get_icon_dir(), '%s-32.png' % addon.id)) previews = all(storage.exists(p.thumbnail_path) for p in addon.previews.all()) return {'overall': icons and previews, 'icons': icons, 'previews': previews} @json_view def ajax_upload_image(request, upload_type, addon_id=None): errors = [] upload_hash = '' if 'upload_image' in request.FILES: upload_preview = request.FILES['upload_image'] upload_preview.seek(0) upload_hash = uuid.uuid4().hex loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash) with storage.open(loc, 'wb') as fd: for chunk in upload_preview: fd.write(chunk) is_icon = upload_type == 'icon' is_persona = upload_type.startswith('persona_') check = amo.utils.ImageCheck(upload_preview) if (not check.is_image() or upload_preview.content_type not in amo.IMG_TYPES): if is_icon: errors.append(_('Icons must be either PNG or JPG.')) else: errors.append(_('Images must be either PNG or JPG.')) if check.is_animated(): if is_icon: errors.append(_('Icons cannot be animated.')) else: errors.append(_('Images cannot be animated.')) max_size = None if is_icon: max_size = settings.MAX_ICON_UPLOAD_SIZE if is_persona: max_size = settings.MAX_PERSONA_UPLOAD_SIZE if max_size and upload_preview.size > max_size: if is_icon: errors.append(_('Please use images smaller than %dMB.') % ( max_size / 1024 / 1024 - 1)) if is_persona: errors.append(_('Images cannot be larger than %dKB.') % ( max_size / 1024)) if check.is_image() and is_persona: persona, img_type = upload_type.split('_') # 'header' or 'footer' expected_size = amo.PERSONA_IMAGE_SIZES.get(img_type)[1] with storage.open(loc, 'rb') as fp: actual_size = Image.open(fp).size if actual_size != expected_size: # L10n: {0} is an image width (in pixels), {1} is a height. errors.append(_('Image must be exactly {0} pixels wide ' 'and {1} pixels tall.') .format(expected_size[0], expected_size[1])) else: errors.append(_('There was an error uploading your preview.')) if errors: upload_hash = '' return {'upload_hash': upload_hash, 'errors': errors} @dev_required def upload_image(request, addon_id, addon, upload_type): return ajax_upload_image(request, upload_type) @dev_required def version_edit(request, addon_id, addon, version_id): version = get_object_or_404(Version, pk=version_id, addon=addon) version_form = forms.VersionForm(request.POST or None, instance=version) new_file_form = forms.NewFileForm(request.POST or None, addon=addon, version=version, request=request) file_form = forms.FileFormSet(request.POST or None, prefix='files', queryset=version.files.all()) file_history = _get_file_history(version) data = {'version_form': version_form, 'file_form': file_form} is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View') if addon.accepts_compatible_apps(): # We should be in no-caching land but this one stays cached for some # reason. qs = version.apps.all().no_cache() compat_form = forms.CompatFormSet(request.POST or None, queryset=qs) data['compat_form'] = compat_form if (request.method == 'POST' and all([form.is_valid() for form in data.values()])): data['version_form'].save() data['file_form'].save() for deleted in data['file_form'].deleted_forms: file = deleted.cleaned_data['id'] amo.log(amo.LOG.DELETE_FILE_FROM_VERSION, file.filename, file.version, addon) if 'compat_form' in data: for compat in data['compat_form'].save(commit=False): compat.version = version compat.save() for form in data['compat_form'].forms: if (isinstance(form, forms.CompatForm) and 'max' in form.changed_data): _log_max_version_change(addon, version, form.instance) messages.success(request, _('Changes successfully saved.')) return redirect('devhub.versions.edit', addon.slug, version_id) data.update(addon=addon, version=version, new_file_form=new_file_form, file_history=file_history, is_admin=is_admin) return render(request, 'devhub/versions/edit.html', data) def _log_max_version_change(addon, version, appversion): details = {'version': version.version, 'target': appversion.version.version, 'application': appversion.application.pk} amo.log(amo.LOG.MAX_APPVERSION_UPDATED, addon, version, details=details) def _get_file_history(version): file_ids = [f.id for f in version.all_files] addon = version.addon file_history = (ActivityLog.objects.for_addons(addon) .filter(action__in=amo.LOG_REVIEW_QUEUE)) files = dict([(fid, []) for fid in file_ids]) for log in file_history: details = log.details current_file_ids = details["files"] if 'files' in details else [] for fid in current_file_ids: if fid in file_ids: files[fid].append(log) return files @dev_required @post_required @transaction.commit_on_success def version_delete(request, addon_id, addon): version_id = request.POST.get('version_id') version = get_object_or_404(Version, pk=version_id, addon=addon) if 'disable_version' in request.POST: messages.success(request, _('Version %s disabled.') % version.version) version.files.update(status=amo.STATUS_DISABLED) else: messages.success(request, _('Version %s deleted.') % version.version) version.delete() return redirect(addon.get_dev_url('versions')) def check_validation_override(request, form, addon, version): if version and form.cleaned_data.get('admin_override_validation'): helper = ReviewHelper(request=request, addon=addon, version=version) helper.set_data( dict(operating_systems='', applications='', comments=_(u'This upload has failed validation, and may ' u'lack complete validation results. Please ' u'take due care when reviewing it.'))) helper.actions['super']['method']() @json_view @dev_required @post_required def version_add(request, addon_id, addon): form = forms.NewVersionForm(request.POST, addon=addon, request=request) if form.is_valid(): pl = (list(form.cleaned_data['desktop_platforms']) + list(form.cleaned_data['mobile_platforms'])) v = Version.from_upload(form.cleaned_data['upload'], addon, pl) log.info('Version created: %s for: %s' % (v.pk, form.cleaned_data['upload'])) check_validation_override(request, form, addon, v) if (addon.status == amo.STATUS_NULL and form.cleaned_data['nomination_type']): addon.update(status=form.cleaned_data['nomination_type']) url = reverse('devhub.versions.edit', args=[addon.slug, str(v.id)]) return dict(url=url) else: return json_view.error(form.errors) @json_view @dev_required @post_required def version_add_file(request, addon_id, addon, version_id): version = get_object_or_404(Version, pk=version_id, addon=addon) form = forms.NewFileForm(request.POST, addon=addon, version=version, request=request) if not form.is_valid(): return json_view.error(form.errors) upload = form.cleaned_data['upload'] new_file = File.from_upload(upload, version, form.cleaned_data['platform'], parse_addon(upload, addon)) storage.delete(upload.path) check_validation_override(request, form, addon, new_file.version) file_form = forms.FileFormSet(prefix='files', queryset=version.files.all()) form = [f for f in file_form.forms if f.instance == new_file] return render(request, 'devhub/includes/version_file.html', {'form': form[0], 'addon': addon}) @dev_required(webapp=True) def version_list(request, addon_id, addon, webapp=False): qs = addon.versions.order_by('-created').transform(Version.transformer) versions = amo.utils.paginate(request, qs) new_file_form = forms.NewVersionForm(None, addon=addon, request=request) is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View') data = {'addon': addon, 'webapp': webapp, 'versions': versions, 'new_file_form': new_file_form, 'position': get_position(addon), 'timestamp': int(time.time()), 'is_admin': is_admin} return render(request, 'devhub/versions/list.html', data) @dev_required def version_bounce(request, addon_id, addon, version): # Use filter since there could be dupes. vs = (Version.objects.filter(version=version, addon=addon) .order_by('-created')) if vs: return redirect('devhub.versions.edit', addon.slug, vs[0].id) else: raise http.Http404() @json_view @dev_required def version_stats(request, addon_id, addon): qs = Version.objects.filter(addon=addon) reviews = (qs.annotate(reviews=Count('reviews')) .values('id', 'version', 'reviews')) d = dict((v['id'], v) for v in reviews) files = qs.annotate(files=Count('files')).values_list('id', 'files') for id, files in files: d[id]['files'] = files return d Step = collections.namedtuple('Step', 'current max') def submit_step(outer_step): """Wraps the function with a decorator that bounces to the right step.""" def decorator(f): @functools.wraps(f) def wrapper(request, *args, **kw): step = outer_step webapp = kw.get('webapp', False) if webapp and step == 7: # decorator calls this step 7, but it's step 5 for apps step = 5 max_step = 5 if webapp else 7 # We only bounce on pages with an addon id. if 'addon' in kw: addon = kw['addon'] on_step = SubmitStep.objects.filter(addon=addon) if on_step: max_step = on_step[0].step if max_step < step: # The step was too high, so bounce to the saved step. return redirect(_step_url(max_step, webapp), addon.slug) elif step != max_step: # We couldn't find a step, so we must be done. return redirect(_step_url(7, webapp), addon.slug) kw['step'] = Step(step, max_step) return f(request, *args, **kw) # Tell @dev_required that this is a function in the submit flow so it # doesn't try to redirect into the submit flow. wrapper.submitting = True return wrapper return decorator def _step_url(step, is_webapp): url_base = 'devhub.submit%s' % ('_apps' if is_webapp else '') if is_webapp and str(step).isdigit() and step > 5: step = 5 return '%s.%s' % (url_base, step) @login_required @submit_step(1) def submit(request, step, webapp=False): if request.method == 'POST': response = redirect(_step_url(2, webapp)) response.set_cookie(DEV_AGREEMENT_COOKIE) return response return render(request, 'devhub/addons/submit/start.html', {'step': step, 'webapp': webapp}) @login_required @submit_step(2) def submit_addon(request, step, webapp=False): if DEV_AGREEMENT_COOKIE not in request.COOKIES: return redirect(_step_url(1, webapp)) NewItem = forms.NewWebappForm if webapp else forms.NewAddonForm form = NewItem(request.POST or None, request=request) if request.method == 'POST': if form.is_valid(): data = form.cleaned_data if webapp: p = [Platform.objects.get(id=amo.PLATFORM_ALL.id)] else: p = (list(data.get('desktop_platforms', [])) + list(data.get('mobile_platforms', []))) addon = Addon.from_upload(data['upload'], p) if webapp: tasks.fetch_icon.delay(addon) AddonUser(addon=addon, user=request.amo_user).save() SubmitStep.objects.create(addon=addon, step=3) check_validation_override(request, form, addon, addon.current_version) return redirect(_step_url(3, webapp), addon.slug) template = 'upload_webapp.html' if webapp else 'upload.html' is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View') return render(request, 'devhub/addons/submit/%s' % template, {'step': step, 'webapp': webapp, 'new_addon_form': form, 'is_admin': is_admin}) @dev_required(webapp=True) @submit_step(3) def submit_describe(request, addon_id, addon, step, webapp=False): form_cls = forms.Step3WebappForm if addon.is_webapp() else forms.Step3Form form = form_cls(request.POST or None, instance=addon, request=request) cat_form = addon_forms.CategoryFormSet(request.POST or None, addon=addon, request=request) if request.method == 'POST' and form.is_valid() and cat_form.is_valid(): addon = form.save(addon) cat_form.save() SubmitStep.objects.filter(addon=addon).update(step=4) return redirect(_step_url(4, webapp), addon.slug) return render(request, 'devhub/addons/submit/describe.html', {'form': form, 'cat_form': cat_form, 'addon': addon, 'step': step, 'webapp': addon.is_webapp()}) @dev_required(webapp=True) @submit_step(4) def submit_media(request, addon_id, addon, step, webapp=False): form_icon = addon_forms.AddonFormMedia(request.POST or None, request.FILES or None, instance=addon, request=request) form_previews = forms.PreviewFormSet(request.POST or None, prefix='files', queryset=addon.previews.all()) if (request.method == 'POST' and form_icon.is_valid() and form_previews.is_valid()): addon = form_icon.save(addon) for preview in form_previews.forms: preview.save(addon) SubmitStep.objects.filter(addon=addon).update(step=5) # Special handling for webapps, where this is jumping to the done step if addon.is_webapp(): addon.update(status=amo.WEBAPPS_UNREVIEWED_STATUS) SubmitStep.objects.filter(addon=addon).delete() signals.submission_done.send(sender=addon) return redirect(_step_url(5, webapp), addon.slug) return render(request, 'devhub/addons/submit/media.html', {'form': form_icon, 'addon': addon, 'step': step, 'preview_form': form_previews, 'webapp': addon.is_webapp()}) @dev_required(webapp=True) @submit_step(5) def submit_license(request, addon_id, addon, step, webapp=False): fs, ctx = [], {} # Versions. license_form = forms.LicenseForm(request.POST or None, addon=addon) if not addon.is_webapp(): ctx.update(license_form.get_context()) fs.append(ctx['license_form']) # Policy. policy_form = forms.PolicyForm(request.POST or None, addon=addon) fs.append(policy_form) if request.method == 'POST' and all([form.is_valid() for form in fs]): if license_form in fs: license_form.save(log=False) policy_form.save() SubmitStep.objects.filter(addon=addon).update(step=6) return redirect('devhub.submit.6', addon.slug) ctx.update(addon=addon, policy_form=policy_form, step=step, webapp=addon.is_webapp()) return render(request, 'devhub/addons/submit/license.html', ctx) @dev_required @submit_step(6) def submit_select_review(request, addon_id, addon, step): review_type_form = forms.ReviewTypeForm(request.POST or None) updated_status = None if request.method == 'POST' and review_type_form.is_valid(): updated_status = review_type_form.cleaned_data['review_type'] if updated_status: addon.update(status=updated_status) SubmitStep.objects.filter(addon=addon).delete() signals.submission_done.send(sender=addon) return redirect('devhub.submit.7', addon.slug) return render(request, 'devhub/addons/submit/select-review.html', {'addon': addon, 'review_type_form': review_type_form, 'step': step}) @dev_required(webapp=True) @submit_step(7) def submit_done(request, addon_id, addon, step, webapp=False): # Bounce to the versions page if they don't have any versions. if not addon.versions.exists(): return redirect(addon.get_dev_url('versions')) sp = addon.current_version.supported_platforms is_platform_specific = sp != [amo.PLATFORM_ALL] try: author = addon.authors.all()[0] except IndexError: # This should never happen. author = None if author: submitted_addons = (author.addons .exclude(type=amo.ADDON_WEBAPP) .exclude(status=amo.STATUS_NULL).count()) if submitted_addons == 1: # We can use locale-prefixed URLs because the submitter probably # speaks the same language by the time he/she reads the email. context = { 'app': unicode(request.APP.pretty), 'detail_url': absolutify(addon.get_url_path()), 'version_url': absolutify(addon.get_dev_url('versions')), 'edit_url': absolutify(addon.get_dev_url('edit')), 'full_review': addon.status == amo.STATUS_NOMINATED } tasks.send_welcome_email.delay(addon.id, [author.email], context) return render(request, 'devhub/addons/submit/done.html', {'addon': addon, 'step': step, 'webapp': addon.is_webapp(), 'is_platform_specific': is_platform_specific}) @dev_required def submit_resume(request, addon_id, addon): step = SubmitStep.objects.filter(addon=addon) return _resume(addon, step) def _resume(addon, step): if step: return redirect(_step_url(step[0].step, addon.is_webapp()), addon.slug) return redirect(addon.get_dev_url('versions')) @login_required @dev_required def submit_bump(request, addon_id, addon, webapp=False): if not acl.action_allowed(request, 'Admin', 'EditSubmitStep'): raise PermissionDenied step = SubmitStep.objects.filter(addon=addon) step = step[0] if step else None if request.method == 'POST' and request.POST.get('step'): new_step = request.POST['step'] if step: step.step = new_step else: step = SubmitStep(addon=addon, step=new_step) step.save() return redirect(_step_url('bump', webapp), addon.slug) return render(request, 'devhub/addons/submit/bump.html', dict(addon=addon, step=step)) @login_required def submit_theme(request): data = {} if request.method == 'POST': data = request.POST.dict() if 'unsaved_data' in request.session and data['unsaved_data'] == '{}': # Restore unsaved data on second invalid POST.. data['unsaved_data'] = request.session['unsaved_data'] form = addon_forms.ThemeForm(data=data or None, files=request.FILES or None, request=request) if request.method == 'POST': if form.is_valid(): addon = form.save() return redirect('devhub.themes.submit.done', addon.slug) else: # Stored unsaved data in request.session since it gets lost on # second invalid POST. messages.error(request, _('Please check the form for errors.')) request.session['unsaved_data'] = data['unsaved_data'] return render(request, 'devhub/personas/submit.html', dict(form=form)) @dev_required(theme=True) def submit_theme_done(request, addon_id, addon, theme): if addon.is_public(): return redirect(addon.get_url_path()) return render(request, 'devhub/personas/submit_done.html', dict(addon=addon)) @dev_required(theme=True) @post_required def remove_locale(request, addon_id, addon, theme): POST = request.POST if 'locale' in POST and POST['locale'] != addon.default_locale: addon.remove_locale(POST['locale']) return http.HttpResponse() return http.HttpResponseBadRequest() # You can only request one of the new review tracks. REQUEST_REVIEW = (amo.STATUS_PUBLIC, amo.STATUS_LITE) @dev_required @post_required def request_review(request, addon_id, addon, status): status_req = int(status) if status_req not in addon.can_request_review(): return http.HttpResponseBadRequest() elif status_req == amo.STATUS_PUBLIC: if addon.status == amo.STATUS_LITE: new_status = amo.STATUS_LITE_AND_NOMINATED else: new_status = amo.STATUS_NOMINATED elif status_req == amo.STATUS_LITE: if addon.status in (amo.STATUS_PUBLIC, amo.STATUS_LITE_AND_NOMINATED): new_status = amo.STATUS_LITE else: new_status = amo.STATUS_UNREVIEWED addon.update(status=new_status) msg = {amo.STATUS_LITE: _('Preliminary Review Requested.'), amo.STATUS_PUBLIC: _('Full Review Requested.')} messages.success(request, msg[status_req]) amo.log(amo.LOG.CHANGE_STATUS, addon.get_status_display(), addon) return redirect(addon.get_dev_url('versions')) # TODO(kumar): Remove when the editor tools are in zamboni. def validator_redirect(request, version_id): v = get_object_or_404(Version, id=version_id) return redirect('devhub.addons.versions', v.addon_id, permanent=True) @post_required @addon_view def admin(request, addon): if not acl.action_allowed(request, 'Addons', 'Configure'): raise PermissionDenied form = forms.AdminForm(request, request.POST or None, instance=addon) if form.is_valid(): form.save() return render(request, 'devhub/addons/edit/admin.html', {'addon': addon, 'admin_form': form}) def docs(request, doc_name=None, doc_page=None): filename = '' all_docs = {'getting-started': [], 'reference': [], 'policies': ['submission', 'reviews', 'maintenance', 'recommended', 'agreement', 'contact'], 'case-studies': ['cooliris', 'stumbleupon', 'download-statusbar'], 'how-to': ['getting-started', 'extension-development', 'thunderbird-mobile', 'theme-development', 'other-addons'], 'themes': ['faq']} if doc_name and doc_name in all_docs: filename = '%s.html' % doc_name if doc_page and doc_page in all_docs[doc_name]: filename = '%s-%s.html' % (doc_name, doc_page) if not filename: return redirect('devhub.index') return render(request, 'devhub/docs/%s' % filename) def builder(request): return render(request, 'devhub/builder.html') @json_view @post_required def check_paypal(request): if 'email' not in request.POST: raise http.Http404() check = Check(paypal_id=request.POST['email']) check.all() # TODO(andym): we will want to l10n these messages at some point and # we'll need to change this to give more detail back to the user than # a tooltip at a later date. return {'valid': check.passed, 'message': ' '.join(check.errors)} def search(request): query = request.GET.get('q', '') return render(request, 'devhub/devhub_search.html', {'query': query})
from copy import copy import urllib from datetime import datetime, timedelta from django.template.loader import render_to_string from django.utils.translation import ugettext_noop from django.utils.translation import ugettext as _ from custom.bihar.utils import (get_team_members, get_all_owner_ids_from_group, SUPERVISOR_ROLES, FLW_ROLES, groups_for_user) from corehq.apps.fixtures.models import FixtureDataItem from corehq.apps.reports.standard import CustomProjectReport from corehq.apps.reports.generic import GenericTabularReport,\ SummaryTablularReport, summary_context from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn from corehq.apps.reports.dispatcher import CustomProjectReportDispatcher from dimagi.utils.excel import alphanumeric_sort_key from dimagi.utils.html import format_html from corehq.apps.groups.models import Group from dimagi.utils.decorators.memoized import memoized from casexml.apps.case.models import CommCareCase from corehq.apps.adm.reports.supervisor import SupervisorReportsADMSection from custom.bihar.reports.indicators.mixins import IndicatorConfigMixIn def shared_bihar_context(report): return { 'home': MainNavReport.get_url(report.domain, render_as=report.render_next), 'render_as': report.render_next, 'mode': report.mode, } class ConvenientBaseMixIn(object): # this is everything that's shared amongst the Bihar supervision reports # this class is an amalgamation of random behavior and is just # for convenience base_template_mobile = "bihar/base_template_mobile.html" report_template_path = "reports/async/tabular.html" hide_filters = True flush_layout = True mobile_enabled = True fields = [] extra_context_providers = [shared_bihar_context] # for the lazy _headers = [] # override @property def headers(self): headers = self._headers[self.mode] if isinstance(self._headers, dict) else self._headers return DataTablesHeader(*(DataTablesColumn(_(h)) for h in headers)) @property def render_next(self): return None if self.rendered_as == "async" else self.rendered_as @classmethod def show_in_navigation(cls, *args, **kwargs): return False @property @memoized def mode(self): # sup_roles = ('ANM', 'LS') # todo if we care about these man_roles = ('MOIC', 'BHM', 'BCM', 'CDPO') if self.request.couch_user.is_commcare_user(): if self.request.couch_user.user_data.get('role', '').upper() in man_roles: return 'manager' return 'supervisor' @property def is_supervisor(self): return self.mode == 'supervisor' @property def is_manager(self): return self.mode == 'manager' def list_prompt(index, value): # e.g. 1. Reports return u"%s. %s" % (_(str(index+1)), _(value)) class ReportReferenceMixIn(object): # allow a report to reference another report @property def next_report_slug(self): return self.request_params.get("next_report") @property def next_report_class(self): return CustomProjectReportDispatcher().get_report(self.domain, self.next_report_slug) class GroupReferenceMixIn(object): # allow a report to reference a group @property def group_id(self): return self.request_params["group"] @property @memoized def group(self): g = Group.get(self.group_id) assert g.domain == self.domain, "Group %s isn't in domain %s" % (g.get_id, self.domain) return g @memoized def get_team_members(self): """ Get any commcare users that are either "asha" or "aww". """ roles = { 'supervisor': FLW_ROLES, 'manager': SUPERVISOR_ROLES, } return get_team_members(self.group, roles=roles[self.mode]) @property @memoized def all_owner_ids(self): return get_all_owner_ids_from_group(self.group) @property @memoized def cases(self): keys = [[self.domain, owner_id, False] for owner_id in self.all_owner_ids] return CommCareCase.view('hqcase/by_owner', keys=keys, include_docs=True, reduce=False) @property @memoized def group_display(self): return { 'supervisor': u'{group} ({awcc})', 'manager': u'{group}', }[self.mode].format(group=self.group.name, awcc=get_awcc(self.group)) @property def rendered_report_title(self): return u"{title} - {group}".format(title=_(self.name), group=self.group_display) def team_member_context(report): """ Gets context for adding a team members listing to a report. """ return { "team_members": report.get_team_members(), } class BiharSummaryReport(ConvenientBaseMixIn, SummaryTablularReport, CustomProjectReport): # this is literally just a way to do a multiple inheritance without having # the same 3 classes extended by a bunch of other classes base_template_mobile = "bihar/bihar_summary.html" report_template_path = "reports/async/summary_tabular.html" extra_context_providers = [shared_bihar_context, summary_context] class BiharNavReport(BiharSummaryReport): # this is a bit of a bastardization of the summary report # but it is quite DRY preserve_url_params = False @property def reports(self): # override raise NotImplementedError("Override this!") @property def _headers(self): return [" "] * len(self.reports) @property def data(self): return [default_nav_link(self, i, report_cls) for i, report_cls in enumerate(self.reports)] class MockEmptyReport(BiharSummaryReport): """ A stub empty report """ _headers = ["Whoops, this report isn't done! Sorry this is still a prototype."] data = [""] class SubCenterSelectionReport(ConvenientBaseMixIn, GenericTabularReport, CustomProjectReport, ReportReferenceMixIn): name = ugettext_noop("Select Subcenter") slug = "subcenter" description = ugettext_noop("Subcenter selection report") _headers = { 'supervisor': [ugettext_noop("Team Name"), ugettext_noop("AWCC")], 'manager': [ugettext_noop("Subcentre")], } @memoized def _get_groups(self): groups = groups_for_user(self.request.couch_user, self.domain) return sorted( groups, key=lambda group: alphanumeric_sort_key(group.name) ) @property def rows(self): return [self._row(g, i+1) for i, g in enumerate(self._get_groups())] def _row(self, group, rank): def _link(g, text): params = copy(self.request_params) params["group"] = g.get_id return format_html(u'<a href="{details}">{text}</a>', text=text, details=url_and_params(self.next_report_class.get_url(domain=self.domain, render_as=self.render_next), params)) return [group.name, _link(group, get_awcc(group))] if self.is_supervisor else [_link(group, group.name)] class MainNavReport(BiharSummaryReport, IndicatorConfigMixIn): name = ugettext_noop("Main Menu") slug = "mainnav" description = ugettext_noop("Main navigation") @classmethod def additional_reports(cls): from custom.bihar.reports.due_list import DueListSelectionReport from custom.bihar.reports.indicators.reports import MyPerformanceReport return [WorkerRankSelectionReport, DueListSelectionReport, ToolsNavReport, MyPerformanceReport] @classmethod def show_in_navigation(cls, *args, **kwargs): return True @property def _headers(self): return [" "] * (len(self.indicator_config.indicator_sets) + len(self.additional_reports())) @property def data(self): from custom.bihar.reports.indicators.reports import IndicatorNav def _indicator_nav_link(i, indicator_set): params = copy(self.request_params) params["indicators"] = indicator_set.slug params["next_report"] = IndicatorNav.slug return format_html(u'<a href="{next}">{val}</a>', val=list_prompt(i, indicator_set.name), next=url_and_params( SubCenterSelectionReport.get_url(self.domain, render_as=self.render_next), params )) return [_indicator_nav_link(i, iset) for i, iset in\ enumerate(self.indicator_config.indicator_sets)] + \ [default_nav_link(self, len(self.indicator_config.indicator_sets) + i, r) \ for i, r in enumerate(self.additional_reports())] class WorkerRankSelectionReport(SubCenterSelectionReport): slug = "workerranks" name = ugettext_noop("Worker Rank Table") # The subreport URL is hard coded here until there's an easier # way to get this from configuration WORKER_RANK_SLUG = 'worker_rank_table' def _row(self, group, rank): def _get_url(): # HACK: hard(ish) code get_url until we fix the render_as bug url = SupervisorReportsADMSection.get_url(domain=self.domain, subreport=self.WORKER_RANK_SLUG) # /a/[domain]/reports/adm/[section]/[subreport]/ # needs to become # /a/[domain]/reports/adm/[render_as]/[section]/[subreport]/ if self.render_next: section_chunk = "/{section}/".format(section=SupervisorReportsADMSection.slug) section_with_rendering = "/{render_as}{section_chunk}".format( render_as=self.render_next, section_chunk=section_chunk ) url = url.replace(section_chunk, section_with_rendering) return url url = _get_url() end = datetime.today().date() start = end - timedelta(days=30) params = { "ufilter": 0, "startdate": start.strftime("%Y-%m-%d"), "enddate": end.strftime("%Y-%m-%d") } def _awcc_link(g): params["group"] = g.get_id return format_html(u'<a href="{details}">{awcc}</a>', awcc=get_awcc(g), details=url_and_params(url, params)) return [group.name, _awcc_link(group)] class ToolsNavReport(BiharSummaryReport): name = ugettext_noop("Tools Menu") slug = "tools" _headers = [" ", " ", " "] @property def data(self): def _referral_link(i): params = copy(self.request_params) params["next_report"] = ReferralListReport.slug return format_html(u'<a href="{next}">{val}</a>', val=list_prompt(i, _(ReferralListReport.name)), next=url_and_params( SubCenterSelectionReport.get_url(self.domain, render_as=self.render_next), params )) return [_referral_link(0), default_nav_link(self, 1, EDDCalcReport), default_nav_link(self, 2, BMICalcReport),] class ReferralListReport(GroupReferenceMixIn, MockEmptyReport): name = ugettext_noop("Referrals") slug = "referrals" _headers = [] @property def data(self): # this is being called multiple times def render(f): title = { "public": _("Public Facility"), "private": _("Private Facility"), "transport": _("Transport") }[f.fields["type"]] return format_html(u"%s: %s<br /># %s" % (title, f.fields.get("name", ""), f.fields.get("number", ""))) fixtures = FixtureDataItem.by_group(self.group) _data = [] self._headers = [] for f in fixtures: _data.append(render(f)) self._headers.append(" ") if not _data: _data = ['No referrals for %s' % self.group.name] self._headers = [" "] return _data class InputReport(MockEmptyReport): name = "" slug = "" _headers = [" "] _inputs = [] @property def form_html(self): return render_to_string("bihar/partials/input.html", {"inputs": self._inputs}) @property def data(self): for i in self._inputs: if not self.request.GET.get(i["name"], None): return [self.form_html] return self.calc(self.request.GET) def calc(self, input): return [_("This calculation has not yet been implemented.")] class EDDCalcReport(InputReport): name = ugettext_noop("EDD Calculator") slug = "eddcalc" _inputs = [ { "name": "lmp", "type": "text", "label": ugettext_noop("Enter LMP (DD-MM-YYYY)") } ] _headers = [" "] def calc(self, input): try: lmp_date = datetime.strptime(input["lmp"], "%d-%m-%Y") edd_date = lmp_date + timedelta(days=280) return [_("Estimated Date of Delivery: %s") % edd_date.strftime("%d-%m-%Y")] except ValueError: self._headers = [" ", " "] return [_("Error: We can't parse your input, please try again"), self.form_html] class BMICalcReport(InputReport): name = ugettext_noop("BMI Calculator") slug = "bmicalc" _inputs = [ { "name": "weight", "type": "text", "label": ugettext_noop("Enter weight in kilograms:") }, { "name": "height", "type": "text", "label": ugettext_noop("Enter height in meters:") } ] def calc(self, input): try: weight = float(input["weight"]) height = float(input["height"]) except ValueError: self._headers = [" ", " "] return [_("Error: We can't parse your input, please try again"), self.form_html] bmi = weight / (height * height) if bmi >= 30: return [_("You are obese")] elif bmi >= 25: return [_("You are overweight")] elif bmi >= 18.5: return [_("You are normal weight")] else: return [_("You are underweight")] def default_nav_link(nav_report, i, report_cls): url = report_cls.get_url(nav_report.domain, render_as=nav_report.render_next) if getattr(nav_report, 'preserve_url_params', False): url = url_and_params(url, nav_report.request_params) return format_html(u'<a href="{details}">{val}</a>', val=list_prompt(i, report_cls.name), details=url) def get_awcc(group): return group.metadata.get("awc-code") or _('no awcc') def url_and_params(urlbase, params): assert "?" not in urlbase return "{url}?{params}".format(url=urlbase, params=urllib.urlencode(params))
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'TerminalDevice.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.Config', False, [ ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.State', False, [ ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.Config', False, [ _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Text description for the physical client port ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of the physical client port ''', 'name', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.State', False, [ _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Text description for the physical client port ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('ethernet-compliance-code', REFERENCE_IDENTITY_CLASS, 'EthernetPmdTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'EthernetPmdTypeIdentity', [], [], ''' Ethernet PMD that the transceiver supports. The SFF/QSFP MSAs have registers for this and CFP MSA has similar. ''', 'ethernet_compliance_code', 'openconfig-terminal-device', False), _MetaInfoClassMember('input-power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' The input optical power of this port in units of 0.01dBm. If the port is an aggregate of multiple physical channels, this attribute is the total power or sum of all channels. ''', 'input_power', 'openconfig-terminal-device', False), _MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of the physical client port ''', 'name', 'openconfig-terminal-device', False), _MetaInfoClassMember('otn-compliance-code', REFERENCE_IDENTITY_CLASS, 'OtnApplicationCodeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'OtnApplicationCodeIdentity', [], [], ''' OTN application code supported by the port ''', 'otn_compliance_code', 'openconfig-terminal-device', False), _MetaInfoClassMember('output-power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' The output optical power of this port in units of 0.01dBm. If the port is an aggregate of multiple physical channels, this attribute is the total power or sum of all channels. ''', 'output_power', 'openconfig-terminal-device', False), _MetaInfoClassMember('sonet-sdh-compliance-code', REFERENCE_IDENTITY_CLASS, 'SonetApplicationCodeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'SonetApplicationCodeIdentity', [], [], ''' SONET/SDH application code supported by the port ''', 'sonet_sdh_compliance_code', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.Transceiver.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.Transceiver.Config', False, [ _MetaInfoClassMember('enabled', ATTRIBUTE, 'bool' , None, None, [], [], ''' Turns power on / off to the transceiver -- provides a means to power on/off the transceiver (in the case of SFP, SFP+, QSFP,...) or enable high-power mode (in the case of CFP, CFP2, CFP4) and is optionally supported (device can choose to always enable). True = power on / high power, False = powered off ''', 'enabled', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.Transceiver.State.PresentEnum' : _MetaInfoEnum('PresentEnum', 'ydk.models.openconfig.openconfig_terminal_device', { 'PRESENT':'PRESENT', 'NOT_PRESENT':'NOT_PRESENT', }, 'openconfig-terminal-device', _yang_ns._namespaces['openconfig-terminal-device']), 'TerminalDevice.ClientPorts.Port.Transceiver.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.Transceiver.State', False, [ _MetaInfoClassMember('connector-type', REFERENCE_IDENTITY_CLASS, 'FiberConnectorTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'FiberConnectorTypeIdentity', [], [], ''' Connector type used on this port ''', 'connector_type', 'openconfig-terminal-device', False), _MetaInfoClassMember('date-code', ATTRIBUTE, 'str' , None, None, [], ['\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d+)?(Z|[\\+\\-]\\d{2}:\\d{2})'], ''' Representation of the transceiver date code, typically stored as YYMMDD. The time portion of the value is undefined and not intended to be read. ''', 'date_code', 'openconfig-terminal-device', False), _MetaInfoClassMember('enabled', ATTRIBUTE, 'bool' , None, None, [], [], ''' Turns power on / off to the transceiver -- provides a means to power on/off the transceiver (in the case of SFP, SFP+, QSFP,...) or enable high-power mode (in the case of CFP, CFP2, CFP4) and is optionally supported (device can choose to always enable). True = power on / high power, False = powered off ''', 'enabled', 'openconfig-terminal-device', False), _MetaInfoClassMember('fault-condition', ATTRIBUTE, 'bool' , None, None, [], [], ''' Indicates if a fault condition exists in the transceiver ''', 'fault_condition', 'openconfig-terminal-device', False), _MetaInfoClassMember('form-factor', REFERENCE_IDENTITY_CLASS, 'TransceiverFormFactorTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'TransceiverFormFactorTypeIdentity', [], [], ''' Indicates the type of optical transceiver used on this port. If the client port is built into the device and not plugable, then non-pluggable is the corresponding state. If a device port supports multiple form factors (e.g. QSFP28 and QSFP+, then the value of the transceiver installed shall be reported. If no transceiver is present, then the value of the highest rate form factor shall be reported (QSFP28, for example). ''', 'form_factor', 'openconfig-terminal-device', False), _MetaInfoClassMember('internal-temp', ATTRIBUTE, 'int' , None, None, [(-40, 125)], [], ''' Internally measured temperature in degrees Celsius. MSA valid range is between -40 and +125C. Accuracy shall be better than +/- 3 degC over the whole temperature range. ''', 'internal_temp', 'openconfig-terminal-device', False), _MetaInfoClassMember('present', REFERENCE_ENUM_CLASS, 'PresentEnum' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.Transceiver.State.PresentEnum', [], [], ''' Indicates whether a transceiver is present in the specified client port. ''', 'present', 'openconfig-terminal-device', False), _MetaInfoClassMember('serial-no', ATTRIBUTE, 'str' , None, None, [(1, 16)], [], ''' Transceiver serial number. 16-octet field that contains ASCII characters, left-aligned and padded on the right with ASCII spaces (20h). If part serial number is undefined, all 16 octets = 0h ''', 'serial_no', 'openconfig-terminal-device', False), _MetaInfoClassMember('vendor', ATTRIBUTE, 'str' , None, None, [(1, 16)], [], ''' Full name of transceiver vendor. 16-octet field that contains ASCII characters, left-aligned and padded on the right with ASCII spaces (20h) ''', 'vendor', 'openconfig-terminal-device', False), _MetaInfoClassMember('vendor-part', ATTRIBUTE, 'str' , None, None, [(1, 16)], [], ''' Transceiver vendor's part number. 16-octet field that contains ASCII characters, left-aligned and padded on the right with ASCII spaces (20h). If part number is undefined, all 16 octets = 0h ''', 'vendor_part', 'openconfig-terminal-device', False), _MetaInfoClassMember('vendor-rev', ATTRIBUTE, 'str' , None, None, [(1, 2)], [], ''' Transceiver vendor's revision number. 2-octet field that contains ASCII characters, left-aligned and padded on the right with ASCII spaces (20h) ''', 'vendor_rev', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.Transceiver' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.Transceiver', False, [ _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.Transceiver.Config', [], [], ''' Configuration data for client port transceivers ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.Transceiver.State', [], [], ''' Operational state data for client port transceivers ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'transceiver', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.Config', False, [ _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Text description for the client physical channel ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 65535)], [], ''' Index of the physical channnel or lane within a physical client port ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('tx-laser', ATTRIBUTE, 'bool' , None, None, [], [], ''' Enable (true) or disable (false) the transmit label for the channel ''', 'tx_laser', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.State', False, [ _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Text description for the client physical channel ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 65535)], [], ''' Index of the physical channnel or lane within a physical client port ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('output-frequency', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' The frequency in MHz of the individual physical channel (e.g. ITU C50 - 195.0THz and would be reported as 195,000,000 MHz in this model). This attribute is not configurable on most client ports. ''', 'output_frequency', 'openconfig-terminal-device', False), _MetaInfoClassMember('output-power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' The output optical power of this port in units of 0.01dBm. If the port is an aggregate of multiple physical channels, this attribute is the total power or sum of all channels. ''', 'output_power', 'openconfig-terminal-device', False), _MetaInfoClassMember('tx-laser', ATTRIBUTE, 'bool' , None, None, [], [], ''' Enable (true) or disable (false) the transmit label for the channel ''', 'tx_laser', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel', False, [ _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 65535)], [], ''' Reference to the index number of the client channel ''', 'index', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.Config', [], [], ''' Configuration data ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.State', [], [], ''' Operational state data for client channels ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'channel', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.PhysicalChannels' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.PhysicalChannels', False, [ _MetaInfoClassMember('channel', REFERENCE_LIST, 'Channel' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel', [], [], ''' List of client channels, keyed by index within a physical client port. A physical port with a single channel would have a single zero-indexed element ''', 'channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'physical-channels', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.Config', False, [ _MetaInfoClassMember('allocation', ATTRIBUTE, 'Decimal64' , None, None, [('-9223372036854775.808', '9223372036854775.807')], [], ''' Allocation of the client physical port to the assigned logical channel expressed in Gbps. In most cases, the full client physical port rate is assigned to a single logical channel. ''', 'allocation', 'openconfig-terminal-device', False), _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Descriptive name for the client port-to-logical channel mapping ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Index of the client port assignment ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channel', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to the logical channel for this assignment ''', 'logical_channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.State', False, [ _MetaInfoClassMember('allocation', ATTRIBUTE, 'Decimal64' , None, None, [('-9223372036854775.808', '9223372036854775.807')], [], ''' Allocation of the client physical port to the assigned logical channel expressed in Gbps. In most cases, the full client physical port rate is assigned to a single logical channel. ''', 'allocation', 'openconfig-terminal-device', False), _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Descriptive name for the client port-to-logical channel mapping ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Index of the client port assignment ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channel', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to the logical channel for this assignment ''', 'logical_channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment', False, [ _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to the index of this logical client assignment ''', 'index', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.Config', [], [], ''' Configuration data for the logical client assignment ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.State', [], [], ''' Operational state data for the logical client assignment ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'assignment', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port.LogicalChannelAssignments', False, [ _MetaInfoClassMember('assignment', REFERENCE_LIST, 'Assignment' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment', [], [], ''' List of assignments to logical clients ''', 'assignment', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'logical-channel-assignments', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts.Port' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts.Port', False, [ _MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None, [], [], ''' Reference to the name of the client port ''', 'name', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.Config', [], [], ''' Configuration data for client physical ports ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channel-assignments', REFERENCE_CLASS, 'LogicalChannelAssignments' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.LogicalChannelAssignments', [], [], ''' Enclosing container for client port to logical client mappings ''', 'logical_channel_assignments', 'openconfig-terminal-device', False), _MetaInfoClassMember('physical-channels', REFERENCE_CLASS, 'PhysicalChannels' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.PhysicalChannels', [], [], ''' Enclosing container for client channels ''', 'physical_channels', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.State', [], [], ''' Operational state data for client physical ports ''', 'state', 'openconfig-terminal-device', False), _MetaInfoClassMember('transceiver', REFERENCE_CLASS, 'Transceiver' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port.Transceiver', [], [], ''' Top-level container for client port transceiver data ''', 'transceiver', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'port', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.ClientPorts' : { 'meta_info' : _MetaInfoClass('TerminalDevice.ClientPorts', False, [ _MetaInfoClassMember('port', REFERENCE_LIST, 'Port' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts.Port', [], [], ''' List of client physical ports on the terminal device ''', 'port', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'client-ports', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.Config', False, [ _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Description of the client logical channel ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Index of the current logical client channel ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('protocol-type', REFERENCE_IDENTITY_CLASS, 'LogicalElementProtocolTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'LogicalElementProtocolTypeIdentity', [], [], ''' The type / stage of the logical element determines the configuration and operational state parameters (PMs) available for the logical element ''', 'protocol_type', 'openconfig-terminal-device', False), _MetaInfoClassMember('trib-protocol', REFERENCE_IDENTITY_CLASS, 'TributaryProtocolTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'TributaryProtocolTypeIdentity', [], [], ''' Protocol framing of the tributary signal. If this LogicalChannel is directly connected to a Client-Port or Optical-Channel, this is the protocol of the associated port. If the LogicalChannel is connected to other LogicalChannels, the TributaryProtocol of the LogicalChannels will define a specific mapping/demapping or multiplexing/demultiplexing function. Not all protocols are valid, depending on the value of trib-rate-class. The expectation is that the NMS will validate that a correct combination of rate class and protocol are specfied. Basic combinations are: rate class: 1G protocols: 1GE rate class: 2.5G protocols: OC48, STM16 rate class: 10G protocols: 10GE LAN, 10GE WAN, OC192, STM64, OTU2, OTU2e, OTU1e, ODU2, ODU2e, ODU1e rate class: 40G protocols: 40GE, OC768, STM256, OTU3, ODU3 rate class: 100G protocols: 100GE, 100G MLG, OTU4, OTUCn, ODU4 ''', 'trib_protocol', 'openconfig-terminal-device', False), _MetaInfoClassMember('trib-rate-class', REFERENCE_IDENTITY_CLASS, 'TributaryRateClassTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'TributaryRateClassTypeIdentity', [], [], ''' Rounded bit rate of the tributary signal. Exact bit rate will be refined by protocol selection. ''', 'trib_rate_class', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.State.Ethernet' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.State.Ethernet', False, [ _MetaInfoClassMember('in-8021q-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Number of 802.1q tagged frames received on the interface ''', 'in_8021q_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('in-crc-errors', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Number of receive error events due to FCS/CRC check failure ''', 'in_crc_errors', 'openconfig-terminal-device', False), _MetaInfoClassMember('in-fragment-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Number of fragment frames received on the interface. ''', 'in_fragment_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('in-jabber-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Number of jabber frames received on the interface. Jabber frames are typically defined as oversize frames which also have a bad CRC. Implementations may use slightly different definitions of what constitutes a jabber frame. Often indicative of a NIC hardware problem. ''', 'in_jabber_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('in-mac-control-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' MAC layer control frames received on the interface ''', 'in_mac_control_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('in-mac-pause-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' MAC layer PAUSE frames received on the interface ''', 'in_mac_pause_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('in-oversize-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Number of oversize frames received on the interface ''', 'in_oversize_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('out-8021q-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Number of 802.1q tagged frames sent on the interface ''', 'out_8021q_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('out-mac-control-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' MAC layer control frames sent on the interface ''', 'out_mac_control_frames', 'openconfig-terminal-device', False), _MetaInfoClassMember('out-mac-pause-frames', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' MAC layer PAUSE frames sent on the interface ''', 'out_mac_pause_frames', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'ethernet', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.State.Otn.PreFecBer' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.State.Otn.PreFecBer', False, [ _MetaInfoClassMember('avg', ATTRIBUTE, 'Decimal64' , None, None, [('-922337203685477580.8', '922337203685477580.7')], [], ''' The arithmetic mean value of the statistic over the sampling period. ''', 'avg', 'openconfig-terminal-device', False), _MetaInfoClassMember('max', ATTRIBUTE, 'Decimal64' , None, None, [('-922337203685477580.8', '922337203685477580.7')], [], ''' The maximum value of the statitic over the sampling period ''', 'max', 'openconfig-terminal-device', False), _MetaInfoClassMember('min', ATTRIBUTE, 'Decimal64' , None, None, [('-922337203685477580.8', '922337203685477580.7')], [], ''' The minimum value of the statistic over the sampling period ''', 'min', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'pre-fec-ber', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.State.Otn.PostFecBer' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.State.Otn.PostFecBer', False, [ _MetaInfoClassMember('avg', ATTRIBUTE, 'Decimal64' , None, None, [('-922337203685477580.8', '922337203685477580.7')], [], ''' The arithmetic mean value of the statistic over the sampling period. ''', 'avg', 'openconfig-terminal-device', False), _MetaInfoClassMember('max', ATTRIBUTE, 'Decimal64' , None, None, [('-922337203685477580.8', '922337203685477580.7')], [], ''' The maximum value of the statitic over the sampling period ''', 'max', 'openconfig-terminal-device', False), _MetaInfoClassMember('min', ATTRIBUTE, 'Decimal64' , None, None, [('-922337203685477580.8', '922337203685477580.7')], [], ''' The minimum value of the statistic over the sampling period ''', 'min', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'post-fec-ber', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.State.Otn' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.State.Otn', False, [ _MetaInfoClassMember('post-fec-ber', REFERENCE_CLASS, 'PostFecBer' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.State.Otn.PostFecBer', [], [], ''' Bit error rate after forward error correction -- computed value ''', 'post_fec_ber', 'openconfig-terminal-device', False), _MetaInfoClassMember('pre-fec-ber', REFERENCE_CLASS, 'PreFecBer' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.State.Otn.PreFecBer', [], [], ''' Bit error rate before forward error correction -- computed value ''', 'pre_fec_ber', 'openconfig-terminal-device', False), _MetaInfoClassMember('rdi-msg', ATTRIBUTE, 'str' , None, None, [], [], ''' Remote defect indication (RDI) message received ''', 'rdi_msg', 'openconfig-terminal-device', False), _MetaInfoClassMember('tti-msg', ATTRIBUTE, 'str' , None, None, [], [], ''' Trail trace identifier (TTI) message received ''', 'tti_msg', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'otn', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.State.LinkStateEnum' : _MetaInfoEnum('LinkStateEnum', 'ydk.models.openconfig.openconfig_terminal_device', { 'UP':'UP', 'DOWN':'DOWN', }, 'openconfig-terminal-device', _yang_ns._namespaces['openconfig-terminal-device']), 'TerminalDevice.LogicalChannels.Channel.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.State', False, [ _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Description of the client logical channel ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('ethernet', REFERENCE_CLASS, 'Ethernet' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.State.Ethernet', [], [], ''' PMs and counters for Ethernet protocol channels ''', 'ethernet', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Index of the current logical client channel ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('link-state', REFERENCE_ENUM_CLASS, 'LinkStateEnum' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.State.LinkStateEnum', [], [], ''' Link-state of the Ethernet protocol on the logical channel, SONET / SDH framed signal, etc. ''', 'link_state', 'openconfig-terminal-device', False), _MetaInfoClassMember('otn', REFERENCE_CLASS, 'Otn' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.State.Otn', [], [], ''' PMs and statistics for OTN protocol channels ''', 'otn', 'openconfig-terminal-device', False), _MetaInfoClassMember('protocol-type', REFERENCE_IDENTITY_CLASS, 'LogicalElementProtocolTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'LogicalElementProtocolTypeIdentity', [], [], ''' The type / stage of the logical element determines the configuration and operational state parameters (PMs) available for the logical element ''', 'protocol_type', 'openconfig-terminal-device', False), _MetaInfoClassMember('trib-protocol', REFERENCE_IDENTITY_CLASS, 'TributaryProtocolTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'TributaryProtocolTypeIdentity', [], [], ''' Protocol framing of the tributary signal. If this LogicalChannel is directly connected to a Client-Port or Optical-Channel, this is the protocol of the associated port. If the LogicalChannel is connected to other LogicalChannels, the TributaryProtocol of the LogicalChannels will define a specific mapping/demapping or multiplexing/demultiplexing function. Not all protocols are valid, depending on the value of trib-rate-class. The expectation is that the NMS will validate that a correct combination of rate class and protocol are specfied. Basic combinations are: rate class: 1G protocols: 1GE rate class: 2.5G protocols: OC48, STM16 rate class: 10G protocols: 10GE LAN, 10GE WAN, OC192, STM64, OTU2, OTU2e, OTU1e, ODU2, ODU2e, ODU1e rate class: 40G protocols: 40GE, OC768, STM256, OTU3, ODU3 rate class: 100G protocols: 100GE, 100G MLG, OTU4, OTUCn, ODU4 ''', 'trib_protocol', 'openconfig-terminal-device', False), _MetaInfoClassMember('trib-rate-class', REFERENCE_IDENTITY_CLASS, 'TributaryRateClassTypeIdentity' , 'ydk.models.openconfig.openconfig_transport_types', 'TributaryRateClassTypeIdentity', [], [], ''' Rounded bit rate of the tributary signal. Exact bit rate will be refined by protocol selection. ''', 'trib_rate_class', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.Config', False, [ _MetaInfoClassMember('allocation', ATTRIBUTE, 'Decimal64' , None, None, [('-9223372036854775.808', '9223372036854775.807')], [], ''' Allocation of the logical client channel to the tributary or sub-channel, expressed in Gbps ''', 'allocation', 'openconfig-terminal-device', False), _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Name assigned to the logical client channel ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Index of the current logical client channel to tributary mapping ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channel', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to another stage of logical channel elements. ''', 'logical_channel', 'openconfig-terminal-device', False), _MetaInfoClassMember('optical-channel', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Reference to the line-side optical channel that should carry the current logical channel element. Use this reference to exit the logical element stage. ''', 'optical_channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.State', False, [ _MetaInfoClassMember('allocation', ATTRIBUTE, 'Decimal64' , None, None, [('-9223372036854775.808', '9223372036854775.807')], [], ''' Allocation of the logical client channel to the tributary or sub-channel, expressed in Gbps ''', 'allocation', 'openconfig-terminal-device', False), _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Name assigned to the logical client channel ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Index of the current logical client channel to tributary mapping ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channel', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to another stage of logical channel elements. ''', 'logical_channel', 'openconfig-terminal-device', False), _MetaInfoClassMember('optical-channel', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Reference to the line-side optical channel that should carry the current logical channel element. Use this reference to exit the logical element stage. ''', 'optical_channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment', False, [ _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to the index for the current tributary assignment ''', 'index', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.Config', [], [], ''' Configuration data for tributary assignments ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.State', [], [], ''' Operational state data for tributary assignments ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'assignment', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments', False, [ _MetaInfoClassMember('assignment', REFERENCE_LIST, 'Assignment' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment', [], [], ''' Logical channel elements may be assigned directly to optical channels for line-side transmission, or can be further groomed into additional stages of logical channel elements. The grooming can multiplex (i.e., split the current element into multiple elements in the subsequent stage) or de-multiplex (i.e., combine the current element with other elements into the same element in the subsequent stage) logical elements in each stage. Note that to support the ability to groom the logical elements, the list of logical channel elements should be populated with an entry for the logical elements at each stage, starting with the initial assignment from the respective client physical port. Each logical element assignment consists of a pointer to an element in the next stage, or to an optical channel, along with a bandwidth allocation for the corresponding assignment (e.g., to split or combine signal). ''', 'assignment', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'logical-channel-assignments', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels.Channel' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels.Channel', False, [ _MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Reference to the index of the logical client channel ''', 'index', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.Config', [], [], ''' Configuration data for logical client channels ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channel-assignments', REFERENCE_CLASS, 'LogicalChannelAssignments' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments', [], [], ''' Enclosing container for tributary assignments ''', 'logical_channel_assignments', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel.State', [], [], ''' Operational state data for logical client channels ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'channel', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LogicalChannels' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LogicalChannels', False, [ _MetaInfoClassMember('channel', REFERENCE_LIST, 'Channel' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels.Channel', [], [], ''' List of logical channels ''', 'channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'logical-channels', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OpticalChannels.OpticalChannel.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OpticalChannels.OpticalChannel.Config', False, [ _MetaInfoClassMember('frequency', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Frequency of the optical channel ''', 'frequency', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Index number assigned to the optical channel. The index must be unique on the local system. ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('line-port', ATTRIBUTE, 'str' , None, None, [], [], ''' Reference to the line-side physical port that carries this optical channel ''', 'line_port', 'openconfig-terminal-device', False), _MetaInfoClassMember('operational-mode', ATTRIBUTE, 'int' , None, None, [(0, 65535)], [], ''' Vendor-specific mode identifier -- sets the operational mode for the channel ''', 'operational_mode', 'openconfig-terminal-device', False), _MetaInfoClassMember('power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Power level of the optical channel ''', 'power', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OpticalChannels.OpticalChannel.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OpticalChannels.OpticalChannel.State', False, [ _MetaInfoClassMember('frequency', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Frequency of the optical channel ''', 'frequency', 'openconfig-terminal-device', False), _MetaInfoClassMember('index', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Index number assigned to the optical channel. The index must be unique on the local system. ''', 'index', 'openconfig-terminal-device', False), _MetaInfoClassMember('line-port', ATTRIBUTE, 'str' , None, None, [], [], ''' Reference to the line-side physical port that carries this optical channel ''', 'line_port', 'openconfig-terminal-device', False), _MetaInfoClassMember('operational-mode', ATTRIBUTE, 'int' , None, None, [(0, 65535)], [], ''' Vendor-specific mode identifier -- sets the operational mode for the channel ''', 'operational_mode', 'openconfig-terminal-device', False), _MetaInfoClassMember('power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' Power level of the optical channel ''', 'power', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OpticalChannels.OpticalChannel' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OpticalChannels.OpticalChannel', False, [ _MetaInfoClassMember('index', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' ''', 'index', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OpticalChannels.OpticalChannel.Config', [], [], ''' Configuration data ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OpticalChannels.OpticalChannel.State', [], [], ''' Operational state data ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'optical-channel', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OpticalChannels' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OpticalChannels', False, [ _MetaInfoClassMember('optical-channel', REFERENCE_LIST, 'OpticalChannel' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OpticalChannels.OpticalChannel', [], [], ''' List of ''', 'optical_channel', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'optical-channels', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LinePorts.Port.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LinePorts.Port.Config', False, [ _MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of the line port ''', 'name', 'openconfig-terminal-device', False), _MetaInfoClassMember('output-power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' The output optical power of this port in units of 0.01dBm. If the port is an aggregate of multiple physical channels, this attribute is the total power or sum of all channels. ''', 'output_power', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LinePorts.Port.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LinePorts.Port.State', False, [ _MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of the line port ''', 'name', 'openconfig-terminal-device', False), _MetaInfoClassMember('output-power', ATTRIBUTE, 'long' , None, None, [(0, 18446744073709551615L)], [], ''' The output optical power of this port in units of 0.01dBm. If the port is an aggregate of multiple physical channels, this attribute is the total power or sum of all channels. ''', 'output_power', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LinePorts.Port' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LinePorts.Port', False, [ _MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None, [], [], ''' Reference to the name of the line port ''', 'name', 'openconfig-terminal-device', True), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LinePorts.Port.Config', [], [], ''' Configuration data for each physical line port ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LinePorts.Port.State', [], [], ''' Operational state data for each physical line port ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'port', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.LinePorts' : { 'meta_info' : _MetaInfoClass('TerminalDevice.LinePorts', False, [ _MetaInfoClassMember('port', REFERENCE_LIST, 'Port' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LinePorts.Port', [], [], ''' List of line ports ''', 'port', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'line-ports', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OperationalModes.Config' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OperationalModes.Config', False, [ ], 'openconfig-terminal-device', 'config', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OperationalModes.State.SupportedModes' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OperationalModes.State.SupportedModes', False, [ _MetaInfoClassMember('mode-id', ATTRIBUTE, 'int' , None, None, [(0, 65535)], [], ''' Two-octet encoding of the vendor-defined operational mode ''', 'mode_id', 'openconfig-terminal-device', True), _MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None, [], [], ''' Vendor-supplied textual description of the characteristics of this operational mode to enable operators to select the appropriate mode for the application. ''', 'description', 'openconfig-terminal-device', False), _MetaInfoClassMember('vendor-id', ATTRIBUTE, 'str' , None, None, [], [], ''' Identifier to represent the vendor / supplier of the platform and the associated operational mode information ''', 'vendor_id', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'supported-modes', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OperationalModes.State' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OperationalModes.State', False, [ _MetaInfoClassMember('supported-modes', REFERENCE_LIST, 'SupportedModes' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OperationalModes.State.SupportedModes', [], [], ''' List of supported modes and their associated metadata ''', 'supported_modes', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'state', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice.OperationalModes' : { 'meta_info' : _MetaInfoClass('TerminalDevice.OperationalModes', False, [ _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OperationalModes.Config', [], [], ''' Configuration data for operational modes. This should generally be empty, i.e., operational mode information is supplied by the platform vendor and is expected to be read-only ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OperationalModes.State', [], [], ''' Operational state data for vendor-specific operational modes ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'operational-modes', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, 'TerminalDevice' : { 'meta_info' : _MetaInfoClass('TerminalDevice', False, [ _MetaInfoClassMember('client-ports', REFERENCE_CLASS, 'ClientPorts' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.ClientPorts', [], [], ''' Enclosing container for the list of client ports ''', 'client_ports', 'openconfig-terminal-device', False), _MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.Config', [], [], ''' Configuration data for global terminal-device ''', 'config', 'openconfig-terminal-device', False), _MetaInfoClassMember('line-ports', REFERENCE_CLASS, 'LinePorts' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LinePorts', [], [], ''' Enclosing container for line ports ''', 'line_ports', 'openconfig-terminal-device', False), _MetaInfoClassMember('logical-channels', REFERENCE_CLASS, 'LogicalChannels' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.LogicalChannels', [], [], ''' Enclosing container the list of logical channels ''', 'logical_channels', 'openconfig-terminal-device', False), _MetaInfoClassMember('operational-modes', REFERENCE_CLASS, 'OperationalModes' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OperationalModes', [], [], ''' Top-level container for vendor-specific operational mode information ''', 'operational_modes', 'openconfig-terminal-device', False), _MetaInfoClassMember('optical-channels', REFERENCE_CLASS, 'OpticalChannels' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.OpticalChannels', [], [], ''' Enclosing container ''', 'optical_channels', 'openconfig-terminal-device', False), _MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_terminal_device', 'TerminalDevice.State', [], [], ''' Operational state data for global terminal device ''', 'state', 'openconfig-terminal-device', False), ], 'openconfig-terminal-device', 'terminal-device', _yang_ns._namespaces['openconfig-terminal-device'], 'ydk.models.openconfig.openconfig_terminal_device' ), }, } _meta_table['TerminalDevice.ClientPorts.Port.Transceiver.Config']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.Transceiver']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.Transceiver.State']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.Transceiver']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.Config']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel.State']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels.Channel']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.Config']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment.State']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments.Assignment']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.Config']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.State']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.Transceiver']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.PhysicalChannels']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port.LogicalChannelAssignments']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts.Port']['meta_info'] _meta_table['TerminalDevice.ClientPorts.Port']['meta_info'].parent =_meta_table['TerminalDevice.ClientPorts']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.State.Otn.PreFecBer']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.State.Otn']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.State.Otn.PostFecBer']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.State.Otn']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.State.Ethernet']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.State']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.State.Otn']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.State']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.Config']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment.State']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments.Assignment']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.Config']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.State']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel.LogicalChannelAssignments']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels.Channel']['meta_info'] _meta_table['TerminalDevice.LogicalChannels.Channel']['meta_info'].parent =_meta_table['TerminalDevice.LogicalChannels']['meta_info'] _meta_table['TerminalDevice.OpticalChannels.OpticalChannel.Config']['meta_info'].parent =_meta_table['TerminalDevice.OpticalChannels.OpticalChannel']['meta_info'] _meta_table['TerminalDevice.OpticalChannels.OpticalChannel.State']['meta_info'].parent =_meta_table['TerminalDevice.OpticalChannels.OpticalChannel']['meta_info'] _meta_table['TerminalDevice.OpticalChannels.OpticalChannel']['meta_info'].parent =_meta_table['TerminalDevice.OpticalChannels']['meta_info'] _meta_table['TerminalDevice.LinePorts.Port.Config']['meta_info'].parent =_meta_table['TerminalDevice.LinePorts.Port']['meta_info'] _meta_table['TerminalDevice.LinePorts.Port.State']['meta_info'].parent =_meta_table['TerminalDevice.LinePorts.Port']['meta_info'] _meta_table['TerminalDevice.LinePorts.Port']['meta_info'].parent =_meta_table['TerminalDevice.LinePorts']['meta_info'] _meta_table['TerminalDevice.OperationalModes.State.SupportedModes']['meta_info'].parent =_meta_table['TerminalDevice.OperationalModes.State']['meta_info'] _meta_table['TerminalDevice.OperationalModes.Config']['meta_info'].parent =_meta_table['TerminalDevice.OperationalModes']['meta_info'] _meta_table['TerminalDevice.OperationalModes.State']['meta_info'].parent =_meta_table['TerminalDevice.OperationalModes']['meta_info'] _meta_table['TerminalDevice.Config']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info'] _meta_table['TerminalDevice.State']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info'] _meta_table['TerminalDevice.ClientPorts']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info'] _meta_table['TerminalDevice.LogicalChannels']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info'] _meta_table['TerminalDevice.OpticalChannels']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info'] _meta_table['TerminalDevice.LinePorts']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info'] _meta_table['TerminalDevice.OperationalModes']['meta_info'].parent =_meta_table['TerminalDevice']['meta_info']
#!/usr/bin/python from libestateredis.estate_redis import estate as estater from libestateredis.estate_redis_cluster import estate as estaterc from cppesnode.estate_zmqclient import estate as estatez from pyclient.estate import estate as estatep from libestatelocal.estate import estate as estatelocal from scapy.all import sniff from scapy.layers.inet import TCP, IP import sys import time import thread import random import string es = None # global values for pps calculation last_log_timestamp = 0 last_local_pcount = 0 last_global_pcount = 0 dummy_state_size = 0 def get_ecounter(k): count = es.get(k) if count is None: return 0 if count == "ES_NONE": return 0 # TODO try get_global latest if we have a miss try: res = int(count) except ValueError: res = 0 print ("ERROR monitor.py:" " Cannot convert get_ecounter value to int: %s" % str(count)) return res def set_ecounter(k, v): es.set(k, v) def incr_ecounter(k, incr=1): c = get_ecounter(k) set_ecounter(k, c + incr) def get_ecounter_global_sum(k): c = es.get_global(k, red_sum) try: return int(c) except ValueError: print ("ERROR monitor.py: cannot convert get_ecounter_global_sum" " value to int.") return 0 def pkt_callback_debug(pkt): sys.stdout.flush() return pkt.summary() def pkt_callback(pkt): """ Called for each packet seen on br0. Updates NF state, e.g., packet counters. """ # filter for IPv4/TCP packets if IP not in pkt: return if TCP not in pkt: return # create 5 tuple flow identifier flow_id = "flow_%s" % str( (pkt[IP].src, pkt[TCP].sport, pkt[IP].dst, pkt[TCP].dport, str(pkt[IP].proto)) ).replace(" ", "") # do pattern matching on raw data PATTERN = "a" pattern_count = 0 #data = str(pkt[TCP].payload) #if len(data) > 0: # pattern_count = data.count(PATTERN) # randomly match packets (emulate malformed packet detection) if pkt[TCP].seq % random.randint(10, 20) == 0: pattern_count += 1 # update state values: # general packet count incr_ecounter("pcount") # flow specific packet count incr_ecounter("pcount:%s" % flow_id) # general match count incr_ecounter("matchcount", pattern_count) # flow specific match count incr_ecounter("matchcount:%s" % flow_id, pattern_count) # TODO: add state: flows seen, flows active on instance (local dict) # debugging: #return "PKT: " + str(pkt.show()) #pkt.summary() def random_bytes(size): return ''.join( random.choice( string.letters + string.digits) for _ in range(int(size))) def init_state(): """ Initializes estate values, and lcoal values. """ global last_log_timestamp last_log_timestamp = time.time() # if we should use a big chouck of dummy data for state transferes, # initialize it: if dummy_state_size > 0: dummydata = random_bytes(dummy_state_size) es.set("dummystate", dummydata) def log_global_state(): """ Executed periodically. Requets local and global state and logs (outputs it). """ global last_log_timestamp global last_local_pcount global last_global_pcount # receive local values t_get_local_start = time.time() pcount_local = get_ecounter("pcount") matchcount_local = get_ecounter("matchcount") time_local_request = time.time() - t_get_local_start # receive global values t_get_global_start = time.time() pcount_global = get_ecounter_global_sum("pcount") matchcount_global = get_ecounter_global_sum("matchcount") # if we should use dummy state with given size, ensure to fetch it always! if dummy_state_size > 0: dummydata = es.get_global("dummystate", red_latest) #print dummydata time_global_request = time.time() - t_get_global_start # calculate pps timespan = abs(time.time() - last_log_timestamp) last_log_timestamp = time.time() if timespan == 0: raise Exception("We have a zero timespan for PPS calculation") pps_local = (pcount_local - last_local_pcount) / timespan last_local_pcount = pcount_local pps_global = (pcount_global - last_global_pcount) / timespan last_global_pcount = pcount_global # generate log output print("LOG_NETWORK_MONITOR:" "%f;%f;%f;%f;%f;%f;%f;%f;%f;" % (time.time(), pps_local, pps_global, pcount_local, pcount_global, matchcount_local, matchcount_global, time_local_request, time_global_request)) def print_log_header(): # generate log output print("LOG_NETWORK_MONITOR:" "t;" "pps_local;" "pps_global;" "pcount_local;" "pcount_global;" "matchcount_local;" "matchcount_global;" "t_request_local;" "t_request_global;") def log_thread_func(): while True: time.sleep(5) log_global_state() sys.stdout.flush() def red_sum(l): res = sum([float(i) for i in l]) #print "red_sum: %s = %f" % (str(l), res) return res def red_avg(l): if len(l) < 1: return 0 res = sum([float(i) for i in l]) / float(len(l)) #print "red_avg: %s = %f" % (str(l), res) return res def red_latest(l): if len(l) < 1: return "ES_NONE" return l[0] #TODO add real red_latest implementation def main(): global es global dummy_state_size if len(sys.argv) < 3: print "Arguments missing:" print "monitor.py BACKEND INST_ID DUMMY_STATE_SIZE [BACKEND_OPTIONS1...N]" print "e.g.: monitor.py redis 1 10.0.0.1" exit(1) backend = str(sys.argv[1]) instance_id = int(sys.argv[2]) dummy_state_size = float(sys.argv[3]) # in byte! options = sys.argv[4:] print "DUMMY_STATE_SIZE=%d" % dummy_state_size if backend == "redis": es = estater(instance_id, redis_host=options[0]) elif backend == "rediscluster": es = estaterc(instance_id) elif backend == "libestatezmq": es = estatez(instance_id) es.set_connection_properties(port=(8800 + instance_id)) es.start_cppesnode_process( local_api_port=(8800 + instance_id), peerlist=options) elif backend == "libestatepython": es = estatep(0) es.init_libestate(options[0], options[1], options) elif backend == "libestatelocal": es = estatelocal(0) else: print "specified backend not known" if es is None: print "backend not initialized. abort." exit(1) # initialize state init_state() #start logger thread.start_new_thread(log_thread_func, ()) print_log_header() # start monitoring (and block!) sniff(iface="br0", prn=pkt_callback, filter="ip and tcp", store=0) if __name__ == '__main__': main()
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from eventlet import greenthread from oslo.config import cfg from oslo.db import exception as os_db_exception from sqlalchemy import exc as sql_exc from sqlalchemy.orm import exc as sa_exc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.api.v2 import attributes from neutron.common import constants as const from neutron.common import exceptions as exc from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import dvr_mac_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import models_v2 from neutron.db import quota_db # noqa from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.extensions import portbindings from neutron.extensions import providernet as provider from neutron import manager from neutron.openstack.common import excutils from neutron.openstack.common import importutils from neutron.openstack.common import jsonutils from neutron.openstack.common import lockutils from neutron.openstack.common import log from neutron.openstack.common import uuidutils from neutron.plugins.common import constants as service_constants from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import config # noqa from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import rpc LOG = log.getLogger(__name__) MAX_BIND_TRIES = 10 # REVISIT(rkukura): Move this and other network_type constants to # providernet.py? TYPE_MULTI_SEGMENT = 'multi-segment' TAP_DEVICE_PREFIX = 'tap' TAP_DEVICE_PREFIX_LENGTH = 3 class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, dvr_mac_db.DVRDbMixin, external_net_db.External_net_db_mixin, sg_db_rpc.SecurityGroupServerRpcMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, extradhcpopt_db.ExtraDhcpOptMixin): """Implement the Neutron L2 abstractions using modules. Ml2Plugin is a Neutron plugin based on separately extensible sets of network types and mechanisms for connecting to networks of those types. The network types and mechanisms are implemented as drivers loaded via Python entry points. Networks can be made up of multiple segments (not yet fully implemented). """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # List of supported extensions _supported_extension_aliases = ["provider", "external-net", "binding", "quotas", "security-group", "agent", "dhcp_agent_scheduler", "multi-provider", "allowed-address-pairs", "extra_dhcp_opt"] @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] aliases += self.extension_manager.extension_aliases() sg_rpc.disable_security_group_extension_by_config(aliases) self._aliases = aliases return self._aliases def __init__(self): # First load drivers, then initialize DB, then initialize drivers self.type_manager = managers.TypeManager() self.extension_manager = managers.ExtensionManager() self.mechanism_manager = managers.MechanismManager() super(Ml2Plugin, self).__init__() self.type_manager.initialize() self.extension_manager.initialize() self.mechanism_manager.initialize() # bulk support depends on the underlying drivers self.__native_bulk_support = self.mechanism_manager.native_bulk_support self._setup_rpc() # REVISIT(rkukura): Use stevedore for these? self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) LOG.info(_("Modular L2 Plugin initialization complete")) def _setup_rpc(self): self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) def start_rpc_listeners(self): self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), securitygroups_rpc.SecurityGroupServerRpcCallback(), dvr_rpc.DVRServerRpcCallback(), dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback()] self.topic = topics.PLUGIN self.conn = n_rpc.create_connection(new=True) self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads() def _filter_nets_provider(self, context, nets, filters): # TODO(rkukura): Implement filtering. return nets def _process_port_binding(self, mech_context, context, attrs): binding = mech_context._binding port = mech_context.current changes = False host = attrs and attrs.get(portbindings.HOST_ID) if (attributes.is_attr_set(host) and binding.host != host): binding.host = host changes = True # Whenever a DVR serviceable port comes up on a # node, it has to be communicated to the L3 Plugin # and agent for creating the respective namespaces. if (utils.is_dvr_serviced(port['device_owner'])): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if (utils.is_extension_supported( l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)): l3plugin.dvr_update_router_addvm(context, port) vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) if (attributes.is_attr_set(vnic_type) and binding.vnic_type != vnic_type): binding.vnic_type = vnic_type changes = True # treat None as clear of profile. profile = None if attrs and portbindings.PROFILE in attrs: profile = attrs.get(portbindings.PROFILE) or {} if profile not in (None, attributes.ATTR_NOT_SPECIFIED, self._get_profile(binding)): binding.profile = jsonutils.dumps(profile) if len(binding.profile) > models.BINDING_PROFILE_LEN: msg = _("binding:profile value too large") raise exc.InvalidInput(error_message=msg) changes = True # Unbind the port if needed. if changes: binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.vif_details = '' binding.driver = None binding.segment = None if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED binding.vif_details = '' binding.driver = None binding.segment = None binding.host = '' self._update_port_dict_binding(port, binding) return changes def _bind_port_if_needed(self, context, allow_notify=False, need_notify=False): plugin_context = context._plugin_context port_id = context._port['id'] # Since the mechanism driver bind_port() calls must be made # outside a DB transaction locking the port state, it is # possible (but unlikely) that the port's state could change # concurrently while these calls are being made. If another # thread or process succeeds in binding the port before this # thread commits its results, the already committed results are # used. If attributes such as binding:host_id, # binding:profile, or binding:vnic_type are updated # concurrently, this loop retries binding using the new # values. count = 0 while True: # First, determine whether it is necessary and possible to # bind the port. binding = context._binding if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND or not binding.host): # We either don't need to bind the port, or can't, so # notify if needed and return. if allow_notify and need_notify: self._notify_port_updated(context) return context # Limit binding attempts to avoid any possibility of # infinite looping and to ensure an error is logged # instead. This does not need to be tunable because no # more than a couple attempts should ever be required in # normal operation. Log at info level if not 1st attempt. count += 1 if count > MAX_BIND_TRIES: LOG.error(_("Failed to commit binding results for %(port)s " "after %(max)s tries"), {'port': port_id, 'max': MAX_BIND_TRIES}) return context if count > 1: greenthread.sleep(0) # yield LOG.info(_("Attempt %(count)s to bind port %(port)s"), {'count': count, 'port': port_id}) # The port isn't already bound and the necessary # information is available, so attempt to bind the port. bind_context = self._bind_port(context) # Now try to commit result of attempting to bind the port. new_context, did_commit = self._commit_port_binding( plugin_context, port_id, binding, bind_context) if not new_context: # The port has been deleted concurrently, so just # return the unbound result from the initial # transaction that completed before the deletion. LOG.debug("Port %s has been deleted concurrently", port_id) return context # Need to notify if we succeed and our results were # committed. if did_commit and (new_context._binding.vif_type != portbindings.VIF_TYPE_BINDING_FAILED): need_notify = True context = new_context def _bind_port(self, orig_context): # Construct a new PortContext from the one from the previous # transaction. port = orig_context._port orig_binding = orig_context._binding new_binding = models.PortBinding( host=orig_binding.host, vnic_type=orig_binding.vnic_type, profile=orig_binding.profile, vif_type=portbindings.VIF_TYPE_UNBOUND, vif_details='' ) self._update_port_dict_binding(port, new_binding) new_context = driver_context.PortContext( self, orig_context._plugin_context, port, orig_context._network_context._network, new_binding) # Attempt to bind the port and return the context with the # result. self.mechanism_manager.bind_port(new_context) return new_context def _commit_port_binding(self, plugin_context, port_id, orig_binding, new_context): session = plugin_context.session new_binding = new_context._binding # After we've attempted to bind the port, we begin a # transaction, get the current port state, and decide whether # to commit the binding results. # # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): # Get the current port state and build a new PortContext # reflecting this state as original state for subsequent # mechanism driver update_port_*commit() calls. port_db, cur_binding = db.get_locked_port_and_binding(session, port_id) if not port_db: # The port has been deleted concurrently. return (None, None) oport = self._make_port_dict(port_db) port = self._make_port_dict(port_db) network = self.get_network(plugin_context, port['network_id']) cur_context = driver_context.PortContext( self, plugin_context, port, network, cur_binding, original_port=oport) # Commit our binding results only if port has not been # successfully bound concurrently by another thread or # process and no binding inputs have been changed. commit = ((cur_binding.vif_type in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) and orig_binding.host == cur_binding.host and orig_binding.vnic_type == cur_binding.vnic_type and orig_binding.profile == cur_binding.profile) if commit: # Update the port's binding state with our binding # results. cur_binding.vif_type = new_binding.vif_type cur_binding.vif_details = new_binding.vif_details cur_binding.driver = new_binding.driver cur_binding.segment = new_binding.segment # REVISIT(rkukura): The binding:profile attribute is # supposed to be input-only, but the Mellanox driver # currently modifies it while binding. Remove this # code when the Mellanox driver has been updated to # use binding:vif_details instead. if cur_binding.profile != new_binding.profile: cur_binding.profile = new_binding.profile # Update PortContext's port dictionary to reflect the # updated binding state. self._update_port_dict_binding(port, cur_binding) # Update the port status if requested by the bound driver. if new_binding.segment and new_context._new_port_status: port_db.status = new_context._new_port_status port['status'] = new_context._new_port_status # Call the mechanism driver precommit methods, commit # the results, and call the postcommit methods. self.mechanism_manager.update_port_precommit(cur_context) if commit: self.mechanism_manager.update_port_postcommit(cur_context) # Continue, using the port state as of the transaction that # just finished, whether that transaction committed new # results or discovered concurrent port state changes. return (cur_context, commit) def _update_port_dict_binding(self, port, binding): port[portbindings.HOST_ID] = binding.host port[portbindings.VNIC_TYPE] = binding.vnic_type port[portbindings.PROFILE] = self._get_profile(binding) port[portbindings.VIF_TYPE] = binding.vif_type port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) def _get_vif_details(self, binding): if binding.vif_details: try: return jsonutils.loads(binding.vif_details) except Exception: LOG.error(_("Serialized vif_details DB value '%(value)s' " "for port %(port)s is invalid"), {'value': binding.vif_details, 'port': binding.port_id}) return {} def _get_profile(self, binding): if binding.profile: try: return jsonutils.loads(binding.profile) except Exception: LOG.error(_("Serialized profile DB value '%(value)s' for " "port %(port)s is invalid"), {'value': binding.profile, 'port': binding.port_id}) return {} def _ml2_extend_port_dict_binding(self, port_res, port_db): # None when called during unit tests for other plugins. if port_db.port_binding: self._update_port_dict_binding(port_res, port_db.port_binding) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_ml2_extend_port_dict_binding']) # Register extend dict methods for network and port resources. # Each mechanism driver that supports extend attribute for the resources # can add those attribute to the result. db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, ['_ml2_md_extend_network_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_ml2_md_extend_port_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.SUBNETS, ['_ml2_md_extend_subnet_dict']) def _ml2_md_extend_network_dict(self, result, netdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_network_dict(session, result) def _ml2_md_extend_port_dict(self, result, portdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_port_dict(session, result) def _ml2_md_extend_subnet_dict(self, result, subnetdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_subnet_dict(session, result) # Note - The following hook methods have "ml2" in their names so # that they are not called twice during unit tests due to global # registration of hooks in portbindings_db.py used by other # plugins. def _ml2_port_model_hook(self, context, original_model, query): query = query.outerjoin(models.PortBinding, (original_model.id == models.PortBinding.port_id)) return query def _ml2_port_result_filter_hook(self, query, filters): values = filters and filters.get(portbindings.HOST_ID, []) if not values: return query return query.filter(models.PortBinding.host.in_(values)) db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( models_v2.Port, "ml2_port_bindings", '_ml2_port_model_hook', None, '_ml2_port_result_filter_hook') def _notify_port_updated(self, mech_context): port = mech_context._port segment = mech_context.bound_segment if not segment: # REVISIT(rkukura): This should notify agent to unplug port network = mech_context.network.current LOG.warning(_("In _notify_port_updated(), no bound segment for " "port %(port_id)s on network %(network_id)s"), {'port_id': port['id'], 'network_id': network['id']}) return self.notifier.port_update(mech_context._plugin_context, port, segment[api.NETWORK_TYPE], segment[api.SEGMENTATION_ID], segment[api.PHYSICAL_NETWORK]) # TODO(apech): Need to override bulk operations def create_network(self, context, network): net_data = network['network'] tenant_id = self._get_tenant_id_for_create(context, net_data) session = context.session with session.begin(subtransactions=True): self._ensure_default_security_group(context, tenant_id) result = super(Ml2Plugin, self).create_network(context, network) self.extension_manager.process_create_network(session, net_data, result) self._process_l3_create(context, result, net_data) net_data['id'] = result['id'] self.type_manager.create_network_segments(context, net_data, tenant_id) self.type_manager._extend_network_dict_provider(context, result) mech_context = driver_context.NetworkContext(self, context, result) self.mechanism_manager.create_network_precommit(mech_context) try: self.mechanism_manager.create_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_("mechanism_manager.create_network_postcommit " "failed, deleting network '%s'"), result['id']) self.delete_network(context, result['id']) return result def update_network(self, context, id, network): provider._raise_if_updates_provider_attributes(network['network']) session = context.session with session.begin(subtransactions=True): original_network = super(Ml2Plugin, self).get_network(context, id) updated_network = super(Ml2Plugin, self).update_network(context, id, network) self.extension_manager.process_update_network(session, network, original_network) self._process_l3_update(context, updated_network, network['network']) self.type_manager._extend_network_dict_provider(context, updated_network) mech_context = driver_context.NetworkContext( self, context, updated_network, original_network=original_network) self.mechanism_manager.update_network_precommit(mech_context) # TODO(apech) - handle errors raised by update_network, potentially # by re-calling update_network with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_network_postcommit(mech_context) return updated_network def get_network(self, context, id, fields=None): session = context.session with session.begin(subtransactions=True): result = super(Ml2Plugin, self).get_network(context, id, None) self.type_manager._extend_network_dict_provider(context, result) return self._fields(result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): session = context.session with session.begin(subtransactions=True): nets = super(Ml2Plugin, self).get_networks(context, filters, None, sorts, limit, marker, page_reverse) for net in nets: self.type_manager._extend_network_dict_provider(context, net) nets = self._filter_nets_provider(context, nets, filters) nets = self._filter_nets_l3(context, nets, filters) return [self._fields(net, fields) for net in nets] def delete_network(self, context, id): # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() # function is not used because it auto-deletes ports and # subnets from the DB without invoking the derived class's # delete_port() or delete_subnet(), preventing mechanism # drivers from being called. This approach should be revisited # when the API layer is reworked during icehouse. LOG.debug(_("Deleting network %s"), id) session = context.session while True: try: # REVISIT(rkukura): Its not clear that # with_lockmode('update') is really needed in this # transaction, and if not, the semaphore can also be # removed. # # REVISIT: Serialize this operation with a semaphore # to prevent deadlock waiting to acquire a DB lock # held by another thread in the same process, leading # to 'lock wait timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): self._process_l3_delete(context, id) # Get ports to auto-delete. ports = (session.query(models_v2.Port). enable_eagerloads(False). filter_by(network_id=id). with_lockmode('update').all()) LOG.debug(_("Ports to auto-delete: %s"), ports) only_auto_del = all(p.device_owner in db_base_plugin_v2. AUTO_DELETE_PORT_OWNERS for p in ports) if not only_auto_del: LOG.debug(_("Tenant-owned ports exist")) raise exc.NetworkInUse(net_id=id) # Get subnets to auto-delete. subnets = (session.query(models_v2.Subnet). enable_eagerloads(False). filter_by(network_id=id). with_lockmode('update').all()) LOG.debug(_("Subnets to auto-delete: %s"), subnets) if not (ports or subnets): network = self.get_network(context, id) mech_context = driver_context.NetworkContext(self, context, network) self.mechanism_manager.delete_network_precommit( mech_context) self.type_manager.release_network_segments(session, id) record = self._get_network(context, id) LOG.debug(_("Deleting network record %s"), record) session.delete(record) # The segment records are deleted via cascade from the # network record, so explicit removal is not necessary. LOG.debug(_("Committing transaction")) break except os_db_exception.DBError as e: with excutils.save_and_reraise_exception() as ctxt: if isinstance(e.inner_exception, sql_exc.IntegrityError): ctxt.reraise = False msg = _("A concurrent port creation has occurred") LOG.warning(msg) continue for port in ports: try: self.delete_port(context, port.id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Exception auto-deleting port %s"), port.id) for subnet in subnets: try: self.delete_subnet(context, subnet.id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Exception auto-deleting subnet %s"), subnet.id) try: self.mechanism_manager.delete_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the network. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error(_("mechanism_manager.delete_network_postcommit failed")) self.notifier.network_delete(context, id) def create_subnet(self, context, subnet): session = context.session with session.begin(subtransactions=True): result = super(Ml2Plugin, self).create_subnet(context, subnet) self.extension_manager.process_create_subnet(session, subnet, result) mech_context = driver_context.SubnetContext(self, context, result) self.mechanism_manager.create_subnet_precommit(mech_context) try: self.mechanism_manager.create_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_("mechanism_manager.create_subnet_postcommit " "failed, deleting subnet '%s'"), result['id']) self.delete_subnet(context, result['id']) return result def update_subnet(self, context, id, subnet): session = context.session with session.begin(subtransactions=True): original_subnet = super(Ml2Plugin, self).get_subnet(context, id) updated_subnet = super(Ml2Plugin, self).update_subnet( context, id, subnet) self.extension_manager.process_update_subnet(session, subnet, original_subnet) mech_context = driver_context.SubnetContext( self, context, updated_subnet, original_subnet=original_subnet) self.mechanism_manager.update_subnet_precommit(mech_context) # TODO(apech) - handle errors raised by update_subnet, potentially # by re-calling update_subnet with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_subnet_postcommit(mech_context) return updated_subnet def delete_subnet(self, context, id): # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() # function is not used because it deallocates the subnet's addresses # from ports in the DB without invoking the derived class's # update_port(), preventing mechanism drivers from being called. # This approach should be revisited when the API layer is reworked # during icehouse. LOG.debug(_("Deleting subnet %s"), id) session = context.session while True: # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock # wait timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): subnet = self.get_subnet(context, id) # Get ports to auto-deallocate allocated = (session.query(models_v2.IPAllocation). filter_by(subnet_id=id). join(models_v2.Port). filter_by(network_id=subnet['network_id']). with_lockmode('update').all()) LOG.debug(_("Ports to auto-deallocate: %s"), allocated) only_auto_del = all(not a.port_id or a.ports.device_owner in db_base_plugin_v2. AUTO_DELETE_PORT_OWNERS for a in allocated) if not only_auto_del: LOG.debug(_("Tenant-owned ports exist")) raise exc.SubnetInUse(subnet_id=id) if not allocated: mech_context = driver_context.SubnetContext(self, context, subnet) self.mechanism_manager.delete_subnet_precommit( mech_context) LOG.debug(_("Deleting subnet record")) record = self._get_subnet(context, id) session.delete(record) LOG.debug(_("Committing transaction")) break for a in allocated: if a.port_id: # calling update_port() for each allocation to remove the # IP from the port and call the MechanismDrivers data = {'port': {'fixed_ips': [{'subnet_id': ip.subnet_id, 'ip_address': ip.ip_address} for ip in a.ports.fixed_ips if ip.subnet_id != id]}} try: self.update_port(context, a.port_id, data) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Exception deleting fixed_ip from " "port %s"), a.port_id) session.delete(a) try: self.mechanism_manager.delete_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the subnet. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) def create_port(self, context, port): attrs = port['port'] attrs['status'] = const.PORT_STATUS_DOWN session = context.session with session.begin(subtransactions=True): self._ensure_default_security_group_on_port(context, port) sgids = self._get_security_groups_on_port(context, port) dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) result = super(Ml2Plugin, self).create_port(context, port) self.extension_manager.process_create_port(session, attrs, result) self._process_port_create_security_group(context, result, sgids) network = self.get_network(context, result['network_id']) binding = db.add_port_binding(session, result['id']) mech_context = driver_context.PortContext(self, context, result, network, binding) self._process_port_binding(mech_context, context, attrs) result[addr_pair.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, result, attrs.get(addr_pair.ADDRESS_PAIRS))) self._process_port_create_extra_dhcp_opts(context, result, dhcp_opts) self.mechanism_manager.create_port_precommit(mech_context) try: self.mechanism_manager.create_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_("mechanism_manager.create_port_postcommit " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) # REVISIT(rkukura): Is there any point in calling this before # a binding has been successfully established? self.notify_security_groups_member_updated(context, result) try: bound_context = self._bind_port_if_needed(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_("_bind_port_if_needed " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) return bound_context._port def update_port(self, context, id, port): attrs = port['port'] need_port_update_notify = False session = context.session # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: raise exc.PortNotFound(port_id=id) original_port = self._make_port_dict(port_db) updated_port = super(Ml2Plugin, self).update_port(context, id, port) self.extension_manager.process_update_port(session, attrs, original_port) if addr_pair.ADDRESS_PAIRS in port['port']: need_port_update_notify |= ( self.update_address_pairs_on_port(context, id, port, original_port, updated_port)) need_port_update_notify |= self.update_security_group_on_port( context, id, port, original_port, updated_port) network = self.get_network(context, original_port['network_id']) need_port_update_notify |= self._update_extra_dhcp_opts_on_port( context, id, port, updated_port) mech_context = driver_context.PortContext( self, context, updated_port, network, binding, original_port=original_port) need_port_update_notify |= self._process_port_binding( mech_context, context, attrs) self.mechanism_manager.update_port_precommit(mech_context) # TODO(apech) - handle errors raised by update_port, potentially # by re-calling update_port with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_port_postcommit(mech_context) need_port_update_notify |= self.is_security_group_member_updated( context, original_port, updated_port) if original_port['admin_state_up'] != updated_port['admin_state_up']: need_port_update_notify = True bound_port = self._bind_port_if_needed( mech_context, allow_notify=True, need_notify=need_port_update_notify) return bound_port._port def _process_dvr_port_binding(self, mech_context, context, attrs): binding = mech_context._binding port = mech_context.current if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: binding.vif_details = '' binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.driver = None binding.segment = None binding.host = '' self._update_port_dict_binding(port, binding) binding.host = attrs and attrs.get(portbindings.HOST_ID) def update_dvr_port_binding(self, context, id, port): attrs = port['port'] host = attrs and attrs.get(portbindings.HOST_ID) host_set = attributes.is_attr_set(host) if not host_set: LOG.error(_("No Host supplied to bind DVR Port %s"), id) return session = context.session binding = db.get_dvr_port_binding_by_host(session, id, host) if (not binding or binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED): with session.begin(subtransactions=True): if not binding: binding = db.ensure_dvr_port_binding( session, id, host, router_id=attrs['device_id']) orig_port = super(Ml2Plugin, self).get_port(context, id) network = self.get_network(context, orig_port['network_id']) mech_context = driver_context.DvrPortContext(self, context, orig_port, network, binding, original_port=orig_port) self._process_dvr_port_binding(mech_context, context, attrs) self.mechanism_manager.bind_port(mech_context) # Now try to commit result of attempting to bind the port. self._commit_dvr_port_binding(mech_context._plugin_context, orig_port['id'], host, mech_context) def _commit_dvr_port_binding(self, plugin_context, port_id, host, mech_context): session = plugin_context.session new_binding = mech_context._binding with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): # Get the current port state and build a new PortContext # reflecting this state as original state for subsequent # mechanism driver update_port_*commit() calls. cur_binding = db.get_dvr_port_binding_by_host(session, port_id, host) # Commit our binding results only if port has not been # successfully bound concurrently by another thread or # process and no binding inputs have been changed. commit = ((cur_binding.vif_type in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) and new_binding.host == cur_binding.host and new_binding.vnic_type == cur_binding.vnic_type and new_binding.profile == cur_binding.profile) if commit: # Update the port's binding state with our binding # results. cur_binding.vif_type = new_binding.vif_type cur_binding.vif_details = new_binding.vif_details cur_binding.driver = new_binding.driver cur_binding.segment = new_binding.segment if cur_binding.profile != new_binding.profile: cur_binding.profile = new_binding.profile def delete_port(self, context, id, l3_port_check=True): LOG.debug(_("Deleting port %s"), id) removed_routers = [] l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) is_dvr_enabled = utils.is_extension_supported( l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS) if l3plugin and l3_port_check: l3plugin.prevent_l3_port_deletion(context, id) session = context.session # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: # the port existed when l3plugin.prevent_l3_port_deletion # was called but now is already gone LOG.debug(_("The port '%s' was deleted"), id) return port = self._make_port_dict(port_db) network = self.get_network(context, port['network_id']) mech_context = None if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_dvr_port_bindings(context.session, id) for bind in bindings: mech_context = driver_context.DvrPortContext( self, context, port, network, bind) self.mechanism_manager.delete_port_precommit(mech_context) else: mech_context = driver_context.PortContext(self, context, port, network, binding) if "compute:" in port['device_owner'] and is_dvr_enabled: router_info = l3plugin.dvr_deletens_if_no_vm(context, id) removed_routers += router_info self.mechanism_manager.delete_port_precommit(mech_context) self._delete_port_security_group_bindings(context, id) if l3plugin: router_ids = l3plugin.disassociate_floatingips( context, id, do_notify=False) if is_dvr_enabled: l3plugin.dvr_vmarp_table_update(context, id, "del") LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s" % {"port_id": id, "owner": port['device_owner']}) super(Ml2Plugin, self).delete_port(context, id) # now that we've left db transaction, we are safe to notify if l3plugin: l3plugin.notify_routers_updated(context, router_ids) for router in removed_routers: l3plugin.remove_router_from_l3_agent( context, router['agent_id'], router['router_id']) try: # for both normal and DVR Interface ports, only one invocation of # delete_port_postcommit. We use gather/scatter technique for DVR # interface ports, where the bindings are gathered in # delete_port_precommit() call earlier and scattered as l2pop # rules to cloud nodes in delete_port_postcommit() here if mech_context: self.mechanism_manager.delete_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the port. Ideally we'd notify the caller of the # fact that an error occurred. LOG.error(_("mechanism_manager.delete_port_postcommit failed for " "port %s"), id) self.notify_security_groups_member_updated(context, port) def get_bound_port_context(self, plugin_context, port_id, host=None): session = plugin_context.session with session.begin(subtransactions=True): try: port_db = (session.query(models_v2.Port). enable_eagerloads(False). filter(models_v2.Port.id.startswith(port_id)). one()) except sa_exc.NoResultFound: return except exc.MultipleResultsFound: LOG.error(_("Multiple ports have port_id starting with %s"), port_id) return port = self._make_port_dict(port_db) network = self.get_network(plugin_context, port['network_id']) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_dvr_port_binding_by_host( session, port['id'], host) if not binding: LOG.error(_("Binding info for DVR port %s not found"), port_id) return None port_context = driver_context.DvrPortContext( self, plugin_context, port, network, binding) else: port_context = driver_context.PortContext( self, plugin_context, port, network, port_db.port_binding) return self._bind_port_if_needed(port_context) def update_port_status(self, context, port_id, status, host=None): """ Returns port_id (non-truncated uuid) if the port exists. Otherwise returns None. """ updated = False session = context.session # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port = db.get_port(session, port_id) if not port: LOG.warning(_("Port %(port)s updated up by agent not found"), {'port': port_id}) return None if (port.status != status and port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE): original_port = self._make_port_dict(port) port.status = status updated_port = self._make_port_dict(port) network = self.get_network(context, original_port['network_id']) mech_context = driver_context.PortContext( self, context, updated_port, network, port.port_binding, original_port=original_port) self.mechanism_manager.update_port_precommit(mech_context) updated = True elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_dvr_port_binding_by_host( session, port['id'], host) if not binding: return binding['status'] = status binding.update(binding) updated = True if (updated and port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port = db.get_port(session, port_id) if not port: LOG.warning(_("Port %s not found during update"), port_id) return original_port = self._make_port_dict(port) network = self.get_network(context, original_port['network_id']) port.status = db.generate_dvr_port_status(session, port['id']) updated_port = self._make_port_dict(port) mech_context = (driver_context.DvrPortContext( self, context, updated_port, network, binding, original_port=original_port)) self.mechanism_manager.update_port_precommit(mech_context) if updated: self.mechanism_manager.update_port_postcommit(mech_context) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: db.delete_dvr_port_binding_if_stale(session, binding) return port['id'] def port_bound_to_host(self, context, port_id, host): port = db.get_port(context.session, port_id) if not port: LOG.debug("No Port match for: %s", port_id) return False if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_dvr_port_bindings(context.session, port_id) for b in bindings: if b.host == host: return True LOG.debug("No binding found for DVR port %s", port['id']) return False else: port_host = db.get_port_binding_host(port_id) return (port_host == host) def get_port_from_device(self, device): port_id = self._device_to_port_id(device) port = db.get_port_and_sgs(port_id) if port: port['device'] = device return port def _device_to_port_id(self, device): # REVISIT(rkukura): Consider calling into MechanismDrivers to # process device names, or having MechanismDrivers supply list # of device prefixes to strip. if device.startswith(TAP_DEVICE_PREFIX): return device[TAP_DEVICE_PREFIX_LENGTH:] else: # REVISIT(irenab): Consider calling into bound MD to # handle the get_device_details RPC, then remove the 'else' clause if not uuidutils.is_uuid_like(device): port = db.get_port_from_device_mac(device) if port: return port.id return device
"""Functions to set up models. Any model can in principle be used and fit to photometry and spectra using sdf, the models just need to be set up correctly. For the default models most of that is done here. Basic structure is that there are SpecModels (spectra) and PhotModels (photometry in filters). PhotModels are derived from SpecModels. To start from scratch, it may be beneficial to create a fresh folder in which models will go, since generation can take a long time. Do this by changing the file:model_root setting in .sdf.conf when model_setup is imported (but change it back once that is done). Models with reddening can be created from those without, e.g. with phoenix_reddened_spectra. In general, run setup_phot to regenerate PhotModels when filters have been added or modified. For generic details of how models are structured and how they work, see :py:mod:`sdf.model`. Models ------ ### Phoenix To generate phoenix model grids, first create resampled spectra with resample_phoenix_spectra, which will loop over metallicities to create models with one resolution. Two are needed in the standard setup, r=100 for SpecModels, and r=1000 from which the PhotModels are generated. Then setup_default_phoenix will put all of these together to make the full grids. ### Kurucz Run kurucz_spectra, which simply converts a model grid file into the format used by sdf. ### Blackbodies Run bb_spectra and/or modbb_spectra, which just call functions in sdf.model.SpecModel ### Simplified size distribution Run sdf.model.SpecModel.sd_spectra ### "Real grain" Run real_grain_spectra, which reads in a file that was previously created from IDL codes. """ import glob from os.path import exists,basename from itertools import product from multiprocessing import Pool from scipy.io import readsav import numpy as np import astropy.units as u import extinction as ext from . import convolve from . import model from . import spectrum from . import filter from . import utils from . import config as cfg c_micron = u.micron.to(u.Hz,equivalencies=u.spectral()) def setup_default_phoenix(): """Setup default phoenix models - T_{eff}, logg, and [M/H] grid. PhotModel is derived from high resolution spectra, and SpecModel is lower resolution because high resolution isn't necessary, and saves memory while running. """ # create the high resolution SpecModel phoenix_spectra(in_name_postfix='-r1000',name='phoenix_m',overwrite=True) # compute convolved photometry in all filters and write PhotModel specmodel2phot('phoenix_m',overwrite_filters=True,overwrite_model=True) # create the low resolution SpecModel phoenix_spectra(in_name_postfix='-r100',name='phoenix_m',overwrite=True) def setup_phot(overwrite_filters=False,overwrite_model=True, model_ignore=['phoenix-','phoenix+']): """Rederive convolved models and write combined PhotModel to disk. Parameters ---------- overwrite_filters : bool, optional Set to True to overwrite files for each bandpass. overwrite_model : bool, optional Set to True to overwrite the PhotModel that was generated. model_ignore : list of str, optional Skip models that include these strings. See Also -------- sdf.model_setup.specmodel2phot : Function called for each model. sdf.model_setup.convolve_specmodel : Function that does the heavy lifting. sdf.filter_info : Where filters are set up. Notes ----- This function needs to be run to incorporate new filters and/or propagate zero point offsets into colours/indices. """ for name in cfg.models['names']: do_it = True for ig in model_ignore: if ig in name: print("skipping {}".format(name)) do_it = False if do_it: specmodel2phot(name,overwrite_filters=overwrite_filters, overwrite_model=overwrite_model) def specmodel2phot(mname,overwrite_filters=False,overwrite_model=False): """Generate a PhotModel grid from SpecModel models.""" convolve_specmodel(mname,overwrite=overwrite_filters) m = model.PhotModel.read_convolved_models(mname) m.write_model(m.name,overwrite=overwrite_model) def convolve_specmodel(mname,overwrite=False): """Convolve a SpecModel to generate a set of ConvolvedModels. Parameters ---------- mname : string Name of the model to convolve. A SpecModel that is equal to or wider than the wavelength range required by the filters must exist in config['model_root']. overwrite : bool, optional Overwrite files for each filter. If they exist already they will simply be skipped. Thus, if set to False only convolved models that do not exist will be written, which would be the desired behaviour when new filters have been added (in sdf.filter_info). See Also -------- sdf.convolve : ConvolvedModel class sdf.filter_info : Where new filters are added. sdf.config : Where paths to models are specified. Notes ----- The range of parameters in the ConvolvedModels is the same as in the SpecModel used. """ m = model.SpecModel.read_model(mname) # flat list of all indices (cartesian product) i_list = () for i in range(len(m.parameters)): i_list += (list(range(m.param_shape()[i])),) i_list = [i for i in product(*i_list)] # grid with spectral dimension last fnujy_sr_roll = np.rollaxis(m.fnujy_sr,axis=0,start=len(m.fnujy_sr.shape)) # loop through them and create the ConvolvedModels and the files filters = filter.Filter.all fnujy_sr = np.zeros(len(i_list)) for fname in filters: outfile = cfg.model_loc[mname]+fname+'.fits' if exists(outfile) and overwrite == False: print("Skipping {}, file exists".format(fname)) else: print("Convolving filter {}".format(fname)) for i,ind in enumerate(i_list): s = spectrum.ModelSpectrum(nu_hz=c_micron/m.wavelength, fnujy_sr=fnujy_sr_roll[ind]) conv,cc = s.synthphot(fname) fnujy_sr[i] = conv if len(m.parameters) > 1: fnujy_sr_reshaped = np.reshape(fnujy_sr,m.param_shape()) else: fnujy_sr_reshaped = fnujy_sr cm = convolve.ConvolvedModel(name=mname,filter=fname, parameters=m.parameters, param_values=m.param_values, fnujy_sr=fnujy_sr_reshaped) cm.write_file(outfile,overwrite=overwrite) def kurucz_spectra(): """Generate a SpecModel grid of Castelli & Kurucz models.""" # ranges, keep all [M/H] trange = [3499,26001] lrange = [2.9,5.1] # Solar metallicity f00 = cfg.file['kurucz_models']+'fp00k2odfnew.pck' m = model.SpecModel.read_kurucz(f00) m = model.crop(m,'Teff',trange) m = model.crop(m,'logg',lrange) m.write_model('kurucz-0.0',overwrite=True) # range of metallicities m = model.append_parameter(m,'MH',0.0) fs = glob.glob(cfg.file['kurucz_models']+'f*k2odfnew.pck') for f in fs: if f == f00: continue mx = model.SpecModel.read_kurucz(f) mx = model.crop(mx,'Teff',trange) mx = model.crop(mx,'logg',lrange) fn = basename(f) mhx = float( str(fn[2]) + '.' + str(fn[3]) ) if fn[1] == 'm': mhx *= -1 mx = model.append_parameter(mx,'MH',mhx) m = model.concat(m,mx) m.write_model('kurucz_m',overwrite=True) def phoenix_spectra(in_name_postfix='',name='phoenix_m',overwrite=True): """Combine PHOENIX spectra with a range of [M/H] and write to disk. Parameters ---------- name: string, optional The name of the combined model. in_name_postfix: string, optional String that may appear on the end of the phoenix models we want to combine, in addition to "phoenix-X.X". These models must have already been created by phoenix_mh_spectra using name_postfix. overwrite: bool, optional Write the combined model to disk. This option would only need to be set to False for testing whether some models will actually combine OK. """ s00 = model.SpecModel.read_model('phoenix-0.0'+in_name_postfix) s00 = model.append_parameter(s00,'MH',0.0) for m in ['+0.5',-0.5,-1.0,-1.5,-2.0,-2.5,-3.0,-3.5,-4.0]: s = model.SpecModel.read_model('phoenix'+str(m)+in_name_postfix) s = model.append_parameter(s,'MH',float(m)) s00 = model.concat(s00,s) s00.write_model(name,overwrite=overwrite) def resample_phoenix_spectra(resolution=100,name_postfix=''): """Resample all phoenix spectra to common wavelength grid. This will take about 2.5h per metallicity for R=100 with 8 cores on a 5k iMac, or about 20h for R=1000. For reference, Spitzer's IRS instrument has low resolution and high resolution modules at R=60-130 and R=600. JSWT MIRI has low and medium resolution at R~100 and R~1550-3250. For IRS the low resolution mode was by far the most common. Thus, for spectra that will be resampled for these instruments R~100 is most sensible. """ for m in [0.5,0.0,-0.5,-1.0,-1.5,-2.0,-2.5,-3.0,-3.5,-4.0]: phoenix_mh_spectra(resolution=resolution,mh=m, name_postfix=name_postfix,overwrite=True) def phoenix_mh_spectra_one(par): """Read in and convolve one phoenix spectrum. This is a helper function so that phoenix_mh/cool_spectra can process a set of phoenix spectra more quickly. Parameters ---------- par : tuple Tuple of wavelengths to resample to, and phoenix file name. See Also -------- phoenix_mh_spectra phoenix_cool_spectra """ wave,f = par print("{}".format(f)) s = spectrum.ModelSpectrum.read_phoenix(f) kern = s.resample(wave) return s def phoenix_mh_spectra(resolution=100,mh=0.0,overwrite=False, processes=cfg.calc['cpu'],name_postfix=''): """Generate a SpecModel at some metallicity from phoenix spectra. For the BT-Settl-AGS2009 models Teff and logg range is hardcoded to 2600-29,000K and 2-4.5. This is a range where the grid is rectangular over metallicities and covers a wide range (+0.5 to -4 at 0.5 steps) . For these models there is also a much smaller set of "cool" models, which go from 2600K down to 400K, with a restricted and non-square range of logg and at Solar metallicity (most metallicities are present above 2000K, but -1.0 is missing). Most realistic set of models is probably logg=3.5 (with a few non-existent 3.5 ones replaced 4.0). Parameters ---------- resolution : float, optional Resolution of generated models. mh : float Metallicity of desired models. overwrite : bool, optional Force overwrite of extant models. processes : int, optional Number of simultaneous processes for calculation. name_postfix : str, optional String to append to model names. See Also -------- phoenix_cool_spectra """ # models from 2600-29000K, logg 2-4.5, at [M/H]=0.0, # sorted by temperature and then gravity if mh == 0.0: mhstr = '{:+}'.format(-np.abs(mh)) else: mhstr = '{:+}'.format(mh) name = 'phoenix'+mhstr+name_postfix # don't do the calculation if there will be a write error if overwrite == False: if name in cfg.model_loc.keys(): if exists(cfg.model_loc[name]+name+'_SpecModel.fits'): raise utils.SdfError("{} exists, will not overwrite". format(cfg.model_loc[name]+name+'.fits')) # the files for the main set of models fs = glob.glob(cfg.file['phoenix_models'] +'lte[0-2][0-9][0-9]-[2-4].?'+mhstr +'a?[0-9].[0-9].BT-Settl.7.bz2') fs.sort() # get the new wavelength grid, and ensure it goes to the max wave = np.power(10,np.arange(np.log10(cfg.models['min_wav_micron']), np.log10(cfg.models['max_wav_micron']), np.log10(1+1/float(resolution)))) if np.max(wave) != cfg.models['max_wav_micron']: wave = np.append(wave,cfg.models['max_wav_micron']) # read in and resample, in parallel pool = Pool(processes=processes) par = zip([wave for i in range(len(fs))],fs) spec = pool.map(phoenix_mh_spectra_one,par) pool.close() # sort spectra teff = [s.param_values['Teff'] for s in spec] logg = [s.param_values['logg'] for s in spec] teffarr = np.unique(teff) loggarr = np.unique(logg) s = model.SpecModel() s.name = spec[0].name s.wavelength = spec[0].wavelength s.parameters = ['Teff','logg'] s.param_values = {'Teff':teffarr, 'logg':loggarr} s.fnujy_sr = np.zeros((len(s.wavelength), len(teffarr), len(loggarr)),dtype=float) for i,sp in enumerate(spec): if not np.all( np.equal(s.wavelength,sp.wavelength) ): raise utils.SdfError("wavelength grids not the same \ in files {} and {}".format(fs[0],fs[i])) j = np.where(teff[i] == teffarr)[0][0] k = np.where(logg[i] == loggarr)[0][0] s.fnujy_sr[:,j,k] = sp.fnujy_sr s.write_model(name,overwrite=overwrite) return s def reddened_spectra(name='phoenix_m',overwrite=True): """Generate a SpecModel of spectra, with reddening.""" m = model.SpecModel.read_model(name) m0 = model.append_parameter(m,'A_V',0.0) for av in [0.3,0.6,0.9,1.2,1.5,1.8,2.1,2.4,2.7,3.0]: mx = m.copy() mx = model.append_parameter(m,'A_V',av) red = ext.apply(ext.odonnell94(m.wavelength*1e4,av,3.1),1) dim_array = np.ones((1,mx.fnujy_sr.ndim),int).ravel() dim_array[0] = -1 red_reshaped = red.reshape(dim_array) mx.fnujy_sr *= red_reshaped m0 = model.concat(m0,mx) m0.write_model(name+'_av',overwrite=overwrite) def phoenix_cool_spectra(resolution=100,overwrite=False, processes=cfg.calc['cpu'],name_postfix=''): """Generate a SpecModel of cool phoenix spectra. Use a subset of the BT-Settl-AGS2009 models for cool stars. .. todo:: yes, there alarming jumps in the spectra, these are present in the raw files and I'd guess where the opacities aren't available. Parameters ---------- resolution : float, optional Resolution of generated models. overwrite : bool, optional Force overwrite of extant models. processes : int, optional Number of simultaneous processes for calculation. name_postfix : str, optional String to append to model names. See Also -------- phoenix_mh_spectra """ name = 'phoenix_cool'+name_postfix # don't do the calculation if there will be a write error if overwrite == False: if name in cfg.model_loc.keys(): if exists(cfg.model_loc[name]+name+'_SpecModel.fits'): raise utils.SdfError("{} exists, will not overwrite". format(cfg.model_loc[name]+name+'.fits')) # the files for the main set of models fs = glob.glob(cfg.file['phoenix_cool_models']+'*.BT-Settl.7.bz2') fs.sort() # get the new wavelength grid, and ensure it goes to the max wave = np.power(10,np.arange(np.log10(cfg.models['min_wav_micron']), np.log10(cfg.models['max_wav_micron']), np.log10(1+1/float(resolution)))) if np.max(wave) != cfg.models['max_wav_micron']: wave = np.append(wave,cfg.models['max_wav_micron']) # read in and resample, in parallel pool = Pool(processes=processes) par = zip([wave for i in range(len(fs))],fs) spec = pool.map(phoenix_mh_spectra_one,par) pool.close() # sort spectra teff = [s.param_values['Teff'] for s in spec] teffarr = np.unique(teff) s = model.SpecModel() s.name = spec[0].name s.wavelength = spec[0].wavelength s.parameters = ['Teff'] s.param_values = {'Teff':teffarr} s.fnujy_sr = np.zeros((len(s.wavelength), len(teffarr)),dtype=float) for i,sp in enumerate(spec): if not np.all( np.equal(s.wavelength,sp.wavelength) ): raise utils.SdfError("wavelength grids not the same \ in files {} and {}".format(fs[0],fs[i])) j = np.where(teff[i] == teffarr)[0][0] s.fnujy_sr[:,j] = sp.fnujy_sr s.write_model(name,overwrite=overwrite) return s def koester_wd_spectra(resolution=100,overwrite=False,name_postfix=''): """Generate a SpecModel for Koester white dwarf spectra. Parameters ---------- resolution : float, optional Resolution of generated models. overwrite : bool, optional Force overwrite of extant models. name_postfix : str, optional String to append to model names. """ name = 'koester_wd' # don't do the calculation if there will be a write error if overwrite == False: if name in cfg.model_loc.keys(): if exists(cfg.model_loc[name]+name+'_SpecModel.fits'): raise utils.SdfError("{} exists, will not overwrite". format(cfg.model_loc[name]+name+'.fits')) # the files for the main set of models fs = glob.glob(cfg.file['koester_models']+'da*.dk.dat.txt') fs.sort() # get the new wavelength grid, and ensure it goes to the max wave = np.power(10,np.arange(np.log10(cfg.models['min_wav_micron']), np.log10(cfg.models['max_wav_micron']), np.log10(1+1/float(resolution)))) if np.max(wave) != cfg.models['max_wav_micron']: wave = np.append(wave,cfg.models['max_wav_micron']) # read in and resample spec = [] for f in fs: s = spectrum.ModelSpectrum.read_koester(f) s.resample(wave) spec.append(s) # sort spectra teff = [s.param_values['Teff'] for s in spec] logg = [s.param_values['logg'] for s in spec] teffarr = np.unique(teff) loggarr = np.unique(logg) s = model.SpecModel() s.name = spec[0].name s.wavelength = spec[0].wavelength s.parameters = ['Teff','logg'] s.param_values = {'Teff':teffarr, 'logg':loggarr} s.fnujy_sr = np.zeros((len(s.wavelength), len(teffarr), len(loggarr)),dtype=float) for i,sp in enumerate(spec): if not np.all( np.equal(s.wavelength,sp.wavelength) ): raise utils.SdfError("wavelength grids not the same \ in files {} and {}".format(fs[0],fs[i])) j = np.where(teff[i] == teffarr)[0][0] k = np.where(logg[i] == loggarr)[0][0] s.fnujy_sr[:,j,k] = sp.fnujy_sr s.write_model(name,overwrite=overwrite) return s def bb_spectra(): """Generate SpecModel grid of blackbody models.""" model.SpecModel.bb_disk_r(name='bb_disk_r', write=True,overwrite=True) model.SpecModel.bb_disk_r(name='bb_star', temperatures=10**np.arange(2.7,3.5,0.1), write=True,overwrite=True) def modbb_spectra(): """Generate SpecModel grid of modified blackbody models.""" model.SpecModel.modbb_disk_r(name='modbb_disk_r', write=True,overwrite=True) model.SpecModel.bb_disk_r(name='modbb_l210b1_disk_r', write=True, overwrite=True, lam0=210.0, beta=1.0) model.SpecModel.modbb_disk_dr(name='modbb_disk_dr', write=True,overwrite=True) def real_grain_spectra(file,overwrite=False): """Real dust grain models from IDL save file. Files are made by sdf/dust_spectra.pro, saving a grid of P(r) with dimensions [wav,temp,dmin,q]. When restored the dimensions are reversed. """ pr = readsav(file) s = model.SpecModel() s.name = 'amsil_r' s.wavelength = pr['wavs'] s.parameters = ['log_Temp','log_Dmin','q'] s.param_values = {'log_Temp': np.log10(pr['tbbs']), 'log_Dmin': np.log10(pr['dmins']), 'q': pr['qs']} s.fnujy_sr = np.rollaxis(np.rollaxis(np.rollaxis(pr['pr'],1),2),3) s.fnujy_sr[ s.fnujy_sr < cfg.tiny ] = cfg.tiny s.write_model(s.name,overwrite=overwrite)
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations. Credit for part of the source is given to https://github.com/akuchling/50-examples/blob/master/gravity.rst Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License """ import numpy import math import matplotlib.pylab as plt from modified_turtle import Turtle from phys_const import * class Body(Turtle): """Subclass of Turtle representing a gravitationally-acting body""" name = 'Body' vx = vy = 0.0 # velocities in m/s px = py = 0.0 # positions in m def attraction(self, other): """(Body): (fx, fy) Returns the force exerted upon this body by the other body""" # Distance of the other body sx, sy = self.px, self.py ox, oy = other.px, other.py dx = (ox-sx) dy = (oy-sy) d = math.sqrt(dx**2 + dy**2) # Force f and direction to the body f = G * self.mass * other.mass / (d**2) theta = math.atan2(dy, dx) # direction of the force fx = math.cos(theta) * f fy = math.sin(theta) * f return fx, fy def loop(bodies, orbit_duration): """([Body]) Loops and updates the positions of all the provided bodies""" # Calculate the duration of our simulation: One full orbit of the outer moon seconds_per_day = 24*60*60 timesteps_per_day = 1000 timestep = seconds_per_day / timesteps_per_day total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day) #print total_steps, orbit_duration / 24 / 60 / 60 for body in bodies: body.penup() body.hideturtle() for step in range(total_steps): for body in bodies: if body.name == 'planet': # Add current position and velocity to our list tdv_list.append(body.vx) ttv_list.append(body.px) force = {} for body in bodies: # Add up all of the forces exerted on 'body' total_fx = total_fy = 0.0 for other in bodies: # Don't calculate the body's attraction to itself if body is other: continue fx, fy = body.attraction(other) total_fx += fx total_fy += fy # Record the total force exerted force[body] = (total_fx, total_fy) # Update velocities based upon on the force for body in bodies: fx, fy = force[body] body.vx += fx / body.mass * timestep body.vy += fy / body.mass * timestep # Update positions body.px += body.vx * timestep body.py += body.vy * timestep #body.goto(body.px*SCALE, body.py*SCALE) #body.dot(3) def run_sim(R_star, transit_duration, bodies): """Run 3-body sim and convert results to TTV + TDV values in [minutes]""" # Run 3-body sim for one full orbit of the outermost moon loop(bodies, orbit_duration) # Move resulting data from lists to numpy arrays ttv_array = numpy.array([]) ttv_array = ttv_list tdv_array = numpy.array([]) tdv_array = tdv_list # Zeropoint correction middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array) ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point) ttv_array = numpy.divide(ttv_array, 1000) # km/s # Compensate for barycenter offset of planet at start of simulation: planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon) stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array)) ttv_array = numpy.divide(ttv_array, stretch_factor) # Convert to time units, TTV ttv_array = numpy.divide(ttv_array, R_star) ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes # Convert to time units, TDV oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec newspeed = oldspeed - numpy.amax(tdv_array) difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60 conversion_factor = difference / numpy.amax(tdv_array) tdv_array = numpy.multiply(tdv_array, conversion_factor) return ttv_array, tdv_array """Main routine""" # Set variables and constants. Do not change these! G = 6.67428e-11 # Gravitational constant G SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim tdv_list = [] ttv_list = [] R_star = 6.96 * 10**5 # [km], solar radius transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0 print transit_duration planet = Body() planet.name = 'planet' planet.mass = M_jup #semimajor_axis = 1. * AU #[m] semimajor_axis = a_jup stellar_mass = M_sun radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3) # Define parameters firstmoon = Body() firstmoon.mass = M_gan firstmoon.px = 0.4218 * 10**9 secondmoon = Body() secondmoon.mass = M_gan secondmoon.px = 0.48945554 * 10**9 thirdmoon = Body() thirdmoon.mass = M_gan thirdmoon.px = 0.77696224 * 10**9 # Calculate start velocities firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px)) secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px)) thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px)) planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass # Calculate planet displacement. This holds for circular orbits gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon) # Use the outermost moon to calculate the length of one full orbit duration orbit_duration = math.sqrt((4 * math.pi**2 *thirdmoon.px ** 3) / (G * (thirdmoon.mass + planet.mass))) orbit_duration = orbit_duration * 2.005 # Run simulation. Make sure to add/remove the moons you want to simulate! ttv_array, tdv_array = run_sim( R_star, transit_duration, [planet, firstmoon, secondmoon, thirdmoon]) # Output information print 'TTV amplitude =', numpy.amax(ttv_array), \ '[min] = ', numpy.amax(ttv_array) * 60, '[sec]' print 'TDV amplitude =', numpy.amax(tdv_array), \ '[min] = ', numpy.amax(tdv_array) * 60, '[sec]' ax = plt.axes() plt.plot(ttv_array, tdv_array, color = 'k') plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}) plt.rc('text', usetex=True) plt.tick_params(axis='both', which='major', labelsize = 16) plt.xlabel('transit timing variation [minutes]', fontsize = 16) plt.ylabel('transit duration variation [minutes]', fontsize = 16) ax.tick_params(direction='out') plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2]) plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2]) plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5) plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5) # Fix axes for comparison with eccentric moon plt.xlim(-0.15, +0.15) plt.ylim(-0.65, +0.65) plt.annotate(r"5:4:2", xy=(-0.145, +0.55), size=16) plt.savefig("fig_system_15.eps", bbox_inches = 'tight')
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'radtrack/ui/LaserInterface.ui' # # Created: Sat Apr 9 00:11:01 2016 # by: PyQt4 UI code generator 4.11.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_LaserInterface(object): def setupUi(self, LaserInterface): LaserInterface.setObjectName(_fromUtf8("LaserInterface")) LaserInterface.resize(639, 519) self.gridLayout_2 = QtGui.QGridLayout(LaserInterface) self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2")) self.verticalLayout_3 = QtGui.QVBoxLayout() self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3")) self.gridLayout = QtGui.QGridLayout() self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.noTitles = QtGui.QToolButton(LaserInterface) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.noTitles.sizePolicy().hasHeightForWidth()) self.noTitles.setSizePolicy(sizePolicy) self.noTitles.setStatusTip(_fromUtf8("")) self.noTitles.setPopupMode(QtGui.QToolButton.InstantPopup) self.noTitles.setObjectName(_fromUtf8("noTitles")) self.gridLayout.addWidget(self.noTitles, 0, 1, 1, 1) self.generatePulse = QtGui.QToolButton(LaserInterface) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.generatePulse.sizePolicy().hasHeightForWidth()) self.generatePulse.setSizePolicy(sizePolicy) self.generatePulse.setToolTip(_fromUtf8("")) self.generatePulse.setObjectName(_fromUtf8("generatePulse")) self.gridLayout.addWidget(self.generatePulse, 0, 0, 1, 1) self.externalFields = QtGui.QToolButton(LaserInterface) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.externalFields.sizePolicy().hasHeightForWidth()) self.externalFields.setSizePolicy(sizePolicy) self.externalFields.setObjectName(_fromUtf8("externalFields")) self.gridLayout.addWidget(self.externalFields, 1, 0, 1, 1) self.generateCoeffs = QtGui.QToolButton(LaserInterface) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.generateCoeffs.sizePolicy().hasHeightForWidth()) self.generateCoeffs.setSizePolicy(sizePolicy) self.generateCoeffs.setObjectName(_fromUtf8("generateCoeffs")) self.gridLayout.addWidget(self.generateCoeffs, 1, 1, 1, 1) self.verticalLayout_3.addLayout(self.gridLayout) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.highLevelInputsLabel = QtGui.QLabel(LaserInterface) self.highLevelInputsLabel.setAlignment(QtCore.Qt.AlignCenter) self.highLevelInputsLabel.setObjectName(_fromUtf8("highLevelInputsLabel")) self.verticalLayout.addWidget(self.highLevelInputsLabel) self.topLevelParams = QtGui.QFormLayout() self.topLevelParams.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow) self.topLevelParams.setFormAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing) self.topLevelParams.setHorizontalSpacing(12) self.topLevelParams.setObjectName(_fromUtf8("topLevelParams")) self.wavelengthLabel = QtGui.QLabel(LaserInterface) self.wavelengthLabel.setObjectName(_fromUtf8("wavelengthLabel")) self.topLevelParams.setWidget(0, QtGui.QFormLayout.LabelRole, self.wavelengthLabel) self.waistSize = QtGui.QLineEdit(LaserInterface) self.waistSize.setToolTip(_fromUtf8("")) self.waistSize.setObjectName(_fromUtf8("waistSize")) self.topLevelParams.setWidget(1, QtGui.QFormLayout.FieldRole, self.waistSize) self.waistPositionLabel = QtGui.QLabel(LaserInterface) self.waistPositionLabel.setMinimumSize(QtCore.QSize(50, 0)) self.waistPositionLabel.setObjectName(_fromUtf8("waistPositionLabel")) self.topLevelParams.setWidget(2, QtGui.QFormLayout.LabelRole, self.waistPositionLabel) self.waistSizeLabel = QtGui.QLabel(LaserInterface) self.waistSizeLabel.setMinimumSize(QtCore.QSize(50, 0)) self.waistSizeLabel.setObjectName(_fromUtf8("waistSizeLabel")) self.topLevelParams.setWidget(1, QtGui.QFormLayout.LabelRole, self.waistSizeLabel) self.wavelength = QtGui.QLineEdit(LaserInterface) self.wavelength.setToolTip(_fromUtf8("")) self.wavelength.setObjectName(_fromUtf8("wavelength")) self.topLevelParams.setWidget(0, QtGui.QFormLayout.FieldRole, self.wavelength) self.waistPosition = QtGui.QLineEdit(LaserInterface) self.waistPosition.setToolTip(_fromUtf8("")) self.waistPosition.setObjectName(_fromUtf8("waistPosition")) self.topLevelParams.setWidget(2, QtGui.QFormLayout.FieldRole, self.waistPosition) self.verticalLayout.addLayout(self.topLevelParams) self.horizontalLayout.addLayout(self.verticalLayout) self.verticalLayout_2 = QtGui.QVBoxLayout() self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.unitsLabel = QtGui.QLabel(LaserInterface) self.unitsLabel.setAlignment(QtCore.Qt.AlignCenter) self.unitsLabel.setObjectName(_fromUtf8("unitsLabel")) self.verticalLayout_2.addWidget(self.unitsLabel) self.units = QtGui.QFormLayout() self.units.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow) self.units.setObjectName(_fromUtf8("units")) self.unitsXYLabel = QtGui.QLabel(LaserInterface) self.unitsXYLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.unitsXYLabel.setObjectName(_fromUtf8("unitsXYLabel")) self.units.setWidget(0, QtGui.QFormLayout.LabelRole, self.unitsXYLabel) self.unitsXY = QtGui.QLineEdit(LaserInterface) self.unitsXY.setToolTip(_fromUtf8("")) self.unitsXY.setObjectName(_fromUtf8("unitsXY")) self.units.setWidget(0, QtGui.QFormLayout.FieldRole, self.unitsXY) self.unitsZLabel = QtGui.QLabel(LaserInterface) self.unitsZLabel.setObjectName(_fromUtf8("unitsZLabel")) self.units.setWidget(1, QtGui.QFormLayout.LabelRole, self.unitsZLabel) self.unitsZ = QtGui.QLineEdit(LaserInterface) self.unitsZ.setToolTip(_fromUtf8("")) self.unitsZ.setDragEnabled(True) self.unitsZ.setObjectName(_fromUtf8("unitsZ")) self.units.setWidget(1, QtGui.QFormLayout.FieldRole, self.unitsZ) self.ticksLabel = QtGui.QLabel(LaserInterface) self.ticksLabel.setObjectName(_fromUtf8("ticksLabel")) self.units.setWidget(2, QtGui.QFormLayout.LabelRole, self.ticksLabel) self.numTicks = QtGui.QLineEdit(LaserInterface) self.numTicks.setObjectName(_fromUtf8("numTicks")) self.units.setWidget(2, QtGui.QFormLayout.FieldRole, self.numTicks) self.verticalLayout_2.addLayout(self.units) self.horizontalLayout.addLayout(self.verticalLayout_2) self.verticalLayout_3.addLayout(self.horizontalLayout) self.offsetLabel = QtGui.QLabel(LaserInterface) self.offsetLabel.setObjectName(_fromUtf8("offsetLabel")) self.verticalLayout_3.addWidget(self.offsetLabel) self.ghTable = QtGui.QTableWidget(LaserInterface) self.ghTable.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.ghTable.setToolTip(_fromUtf8("")) self.ghTable.setObjectName(_fromUtf8("ghTable")) self.ghTable.setColumnCount(2) self.ghTable.setRowCount(100) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(2, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(3, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(4, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(5, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(6, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(7, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(8, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(9, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(10, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(11, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(12, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(13, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(14, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(15, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(16, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(17, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(18, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(19, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(20, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(21, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(22, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(23, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(24, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(25, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(26, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(27, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(28, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(29, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(30, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(31, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(32, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(33, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(34, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(35, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(36, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(37, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(38, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(39, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(40, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(41, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(42, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(43, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(44, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(45, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(46, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(47, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(48, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(49, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(50, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(51, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(52, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(53, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(54, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(55, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(56, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(57, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(58, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(59, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(60, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(61, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(62, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(63, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(64, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(65, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(66, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(67, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(68, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(69, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(70, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(71, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(72, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(73, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(74, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(75, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(76, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(77, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(78, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(79, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(80, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(81, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(82, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(83, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(84, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(85, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(86, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(87, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(88, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(89, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(90, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(91, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(92, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(93, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(94, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(95, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(96, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(97, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(98, item) item = QtGui.QTableWidgetItem() self.ghTable.setVerticalHeaderItem(99, item) item = QtGui.QTableWidgetItem() self.ghTable.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.ghTable.setHorizontalHeaderItem(1, item) self.verticalLayout_3.addWidget(self.ghTable) self.gridLayout_2.addLayout(self.verticalLayout_3, 0, 0, 1, 1) self.verticalLayout_4 = QtGui.QVBoxLayout() self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4")) self.zxPlot = matplotlibWidget(LaserInterface) self.zxPlot.setToolTip(_fromUtf8("")) self.zxPlot.setObjectName(_fromUtf8("zxPlot")) self.verticalLayout_4.addWidget(self.zxPlot) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.xyPlot = matplotlibWidget(LaserInterface) self.xyPlot.setToolTip(_fromUtf8("")) self.xyPlot.setObjectName(_fromUtf8("xyPlot")) self.horizontalLayout_2.addWidget(self.xyPlot) self.zyPlot = matplotlibWidget(LaserInterface) self.zyPlot.setToolTip(_fromUtf8("")) self.zyPlot.setObjectName(_fromUtf8("zyPlot")) self.horizontalLayout_2.addWidget(self.zyPlot) self.xyPlotExtFields = matplotlibWidget(LaserInterface) self.xyPlotExtFields.setToolTip(_fromUtf8("")) self.xyPlotExtFields.setObjectName(_fromUtf8("xyPlotExtFields")) self.horizontalLayout_2.addWidget(self.xyPlotExtFields) self.verticalLayout_4.addLayout(self.horizontalLayout_2) self.gridLayout_2.addLayout(self.verticalLayout_4, 0, 1, 1, 1) self.retranslateUi(LaserInterface) QtCore.QMetaObject.connectSlotsByName(LaserInterface) LaserInterface.setTabOrder(self.waistSize, self.generatePulse) LaserInterface.setTabOrder(self.generatePulse, self.unitsXY) LaserInterface.setTabOrder(self.unitsXY, self.unitsZ) LaserInterface.setTabOrder(self.unitsZ, self.ghTable) def retranslateUi(self, LaserInterface): LaserInterface.setWindowTitle(_translate("LaserInterface", "Form", None)) self.noTitles.setToolTip(_translate("LaserInterface", "Toggle plot titles on/off.", None)) self.noTitles.setText(_translate("LaserInterface", "Toggle Plot Titles", None)) self.generatePulse.setText(_translate("LaserInterface", "Generate Pulse", None)) self.externalFields.setToolTip(_translate("LaserInterface", "Apply external field to pulse", None)) self.externalFields.setText(_translate("LaserInterface", "External fields", None)) self.generateCoeffs.setToolTip(_translate("LaserInterface", "Determine Gauss-Hermite coeffiecients for the current pulse", None)) self.generateCoeffs.setText(_translate("LaserInterface", "Gauss-Hermite coefficients", None)) self.highLevelInputsLabel.setText(_translate("LaserInterface", "Top level params", None)) self.wavelengthLabel.setText(_translate("LaserInterface", "Wavelength [um]", None)) self.waistPositionLabel.setText(_translate("LaserInterface", "Waist Position [m]", None)) self.waistSizeLabel.setText(_translate("LaserInterface", "Waist radius [um]", None)) self.unitsLabel.setText(_translate("LaserInterface", "Plotting units", None)) self.unitsXYLabel.setText(_translate("LaserInterface", " x, y", None)) self.unitsZLabel.setText(_translate("LaserInterface", "<html><head/><body><p>&nbsp; &nbsp; z</p></body></html>", None)) self.ticksLabel.setText(_translate("LaserInterface", "Axis plot ticks", None)) self.numTicks.setToolTip(_translate("LaserInterface", "Maximum number of plot tick marks", None)) self.offsetLabel.setText(_translate("LaserInterface", "Gauss-Hermite coefficients:", None)) item = self.ghTable.verticalHeaderItem(0) item.setText(_translate("LaserInterface", "0", None)) item = self.ghTable.verticalHeaderItem(1) item.setText(_translate("LaserInterface", "1", None)) item = self.ghTable.verticalHeaderItem(2) item.setText(_translate("LaserInterface", "2", None)) item = self.ghTable.verticalHeaderItem(3) item.setText(_translate("LaserInterface", "3", None)) item = self.ghTable.verticalHeaderItem(4) item.setText(_translate("LaserInterface", "4", None)) item = self.ghTable.verticalHeaderItem(5) item.setText(_translate("LaserInterface", "5", None)) item = self.ghTable.verticalHeaderItem(6) item.setText(_translate("LaserInterface", "6", None)) item = self.ghTable.verticalHeaderItem(7) item.setText(_translate("LaserInterface", "7", None)) item = self.ghTable.verticalHeaderItem(8) item.setText(_translate("LaserInterface", "8", None)) item = self.ghTable.verticalHeaderItem(9) item.setText(_translate("LaserInterface", "9", None)) item = self.ghTable.verticalHeaderItem(10) item.setText(_translate("LaserInterface", "10", None)) item = self.ghTable.verticalHeaderItem(11) item.setText(_translate("LaserInterface", "11", None)) item = self.ghTable.verticalHeaderItem(12) item.setText(_translate("LaserInterface", "12", None)) item = self.ghTable.verticalHeaderItem(13) item.setText(_translate("LaserInterface", "13", None)) item = self.ghTable.verticalHeaderItem(14) item.setText(_translate("LaserInterface", "14", None)) item = self.ghTable.verticalHeaderItem(15) item.setText(_translate("LaserInterface", "15", None)) item = self.ghTable.verticalHeaderItem(16) item.setText(_translate("LaserInterface", "16", None)) item = self.ghTable.verticalHeaderItem(17) item.setText(_translate("LaserInterface", "17", None)) item = self.ghTable.verticalHeaderItem(18) item.setText(_translate("LaserInterface", "18", None)) item = self.ghTable.verticalHeaderItem(19) item.setText(_translate("LaserInterface", "19", None)) item = self.ghTable.verticalHeaderItem(20) item.setText(_translate("LaserInterface", "20", None)) item = self.ghTable.verticalHeaderItem(21) item.setText(_translate("LaserInterface", "21", None)) item = self.ghTable.verticalHeaderItem(22) item.setText(_translate("LaserInterface", "22", None)) item = self.ghTable.verticalHeaderItem(23) item.setText(_translate("LaserInterface", "23", None)) item = self.ghTable.verticalHeaderItem(24) item.setText(_translate("LaserInterface", "24", None)) item = self.ghTable.verticalHeaderItem(25) item.setText(_translate("LaserInterface", "25", None)) item = self.ghTable.verticalHeaderItem(26) item.setText(_translate("LaserInterface", "26", None)) item = self.ghTable.verticalHeaderItem(27) item.setText(_translate("LaserInterface", "27", None)) item = self.ghTable.verticalHeaderItem(28) item.setText(_translate("LaserInterface", "28", None)) item = self.ghTable.verticalHeaderItem(29) item.setText(_translate("LaserInterface", "29", None)) item = self.ghTable.verticalHeaderItem(30) item.setText(_translate("LaserInterface", "30", None)) item = self.ghTable.verticalHeaderItem(31) item.setText(_translate("LaserInterface", "31", None)) item = self.ghTable.verticalHeaderItem(32) item.setText(_translate("LaserInterface", "32", None)) item = self.ghTable.verticalHeaderItem(33) item.setText(_translate("LaserInterface", "33", None)) item = self.ghTable.verticalHeaderItem(34) item.setText(_translate("LaserInterface", "34", None)) item = self.ghTable.verticalHeaderItem(35) item.setText(_translate("LaserInterface", "35", None)) item = self.ghTable.verticalHeaderItem(36) item.setText(_translate("LaserInterface", "36", None)) item = self.ghTable.verticalHeaderItem(37) item.setText(_translate("LaserInterface", "37", None)) item = self.ghTable.verticalHeaderItem(38) item.setText(_translate("LaserInterface", "38", None)) item = self.ghTable.verticalHeaderItem(39) item.setText(_translate("LaserInterface", "39", None)) item = self.ghTable.verticalHeaderItem(40) item.setText(_translate("LaserInterface", "40", None)) item = self.ghTable.verticalHeaderItem(41) item.setText(_translate("LaserInterface", "41", None)) item = self.ghTable.verticalHeaderItem(42) item.setText(_translate("LaserInterface", "42", None)) item = self.ghTable.verticalHeaderItem(43) item.setText(_translate("LaserInterface", "43", None)) item = self.ghTable.verticalHeaderItem(44) item.setText(_translate("LaserInterface", "44", None)) item = self.ghTable.verticalHeaderItem(45) item.setText(_translate("LaserInterface", "45", None)) item = self.ghTable.verticalHeaderItem(46) item.setText(_translate("LaserInterface", "46", None)) item = self.ghTable.verticalHeaderItem(47) item.setText(_translate("LaserInterface", "47", None)) item = self.ghTable.verticalHeaderItem(48) item.setText(_translate("LaserInterface", "48", None)) item = self.ghTable.verticalHeaderItem(49) item.setText(_translate("LaserInterface", "49", None)) item = self.ghTable.verticalHeaderItem(50) item.setText(_translate("LaserInterface", "50", None)) item = self.ghTable.verticalHeaderItem(51) item.setText(_translate("LaserInterface", "51", None)) item = self.ghTable.verticalHeaderItem(52) item.setText(_translate("LaserInterface", "52", None)) item = self.ghTable.verticalHeaderItem(53) item.setText(_translate("LaserInterface", "53", None)) item = self.ghTable.verticalHeaderItem(54) item.setText(_translate("LaserInterface", "54", None)) item = self.ghTable.verticalHeaderItem(55) item.setText(_translate("LaserInterface", "55", None)) item = self.ghTable.verticalHeaderItem(56) item.setText(_translate("LaserInterface", "56", None)) item = self.ghTable.verticalHeaderItem(57) item.setText(_translate("LaserInterface", "57", None)) item = self.ghTable.verticalHeaderItem(58) item.setText(_translate("LaserInterface", "58", None)) item = self.ghTable.verticalHeaderItem(59) item.setText(_translate("LaserInterface", "59", None)) item = self.ghTable.verticalHeaderItem(60) item.setText(_translate("LaserInterface", "60", None)) item = self.ghTable.verticalHeaderItem(61) item.setText(_translate("LaserInterface", "61", None)) item = self.ghTable.verticalHeaderItem(62) item.setText(_translate("LaserInterface", "62", None)) item = self.ghTable.verticalHeaderItem(63) item.setText(_translate("LaserInterface", "63", None)) item = self.ghTable.verticalHeaderItem(64) item.setText(_translate("LaserInterface", "64", None)) item = self.ghTable.verticalHeaderItem(65) item.setText(_translate("LaserInterface", "65", None)) item = self.ghTable.verticalHeaderItem(66) item.setText(_translate("LaserInterface", "66", None)) item = self.ghTable.verticalHeaderItem(67) item.setText(_translate("LaserInterface", "67", None)) item = self.ghTable.verticalHeaderItem(68) item.setText(_translate("LaserInterface", "68", None)) item = self.ghTable.verticalHeaderItem(69) item.setText(_translate("LaserInterface", "69", None)) item = self.ghTable.verticalHeaderItem(70) item.setText(_translate("LaserInterface", "71", None)) item = self.ghTable.verticalHeaderItem(71) item.setText(_translate("LaserInterface", "72", None)) item = self.ghTable.verticalHeaderItem(72) item.setText(_translate("LaserInterface", "722", None)) item = self.ghTable.verticalHeaderItem(73) item.setText(_translate("LaserInterface", "73", None)) item = self.ghTable.verticalHeaderItem(74) item.setText(_translate("LaserInterface", "74", None)) item = self.ghTable.verticalHeaderItem(75) item.setText(_translate("LaserInterface", "75", None)) item = self.ghTable.verticalHeaderItem(76) item.setText(_translate("LaserInterface", "76", None)) item = self.ghTable.verticalHeaderItem(77) item.setText(_translate("LaserInterface", "77", None)) item = self.ghTable.verticalHeaderItem(78) item.setText(_translate("LaserInterface", "78", None)) item = self.ghTable.verticalHeaderItem(79) item.setText(_translate("LaserInterface", "79", None)) item = self.ghTable.verticalHeaderItem(80) item.setText(_translate("LaserInterface", "80", None)) item = self.ghTable.verticalHeaderItem(81) item.setText(_translate("LaserInterface", "81", None)) item = self.ghTable.verticalHeaderItem(82) item.setText(_translate("LaserInterface", "82", None)) item = self.ghTable.verticalHeaderItem(83) item.setText(_translate("LaserInterface", "83", None)) item = self.ghTable.verticalHeaderItem(84) item.setText(_translate("LaserInterface", "84", None)) item = self.ghTable.verticalHeaderItem(85) item.setText(_translate("LaserInterface", "85", None)) item = self.ghTable.verticalHeaderItem(86) item.setText(_translate("LaserInterface", "86", None)) item = self.ghTable.verticalHeaderItem(87) item.setText(_translate("LaserInterface", "87", None)) item = self.ghTable.verticalHeaderItem(88) item.setText(_translate("LaserInterface", "88", None)) item = self.ghTable.verticalHeaderItem(89) item.setText(_translate("LaserInterface", "89", None)) item = self.ghTable.verticalHeaderItem(90) item.setText(_translate("LaserInterface", "90", None)) item = self.ghTable.verticalHeaderItem(91) item.setText(_translate("LaserInterface", "91", None)) item = self.ghTable.verticalHeaderItem(92) item.setText(_translate("LaserInterface", "92", None)) item = self.ghTable.verticalHeaderItem(93) item.setText(_translate("LaserInterface", "93", None)) item = self.ghTable.verticalHeaderItem(94) item.setText(_translate("LaserInterface", "95", None)) item = self.ghTable.verticalHeaderItem(95) item.setText(_translate("LaserInterface", "9", None)) item = self.ghTable.verticalHeaderItem(96) item.setText(_translate("LaserInterface", "96", None)) item = self.ghTable.verticalHeaderItem(97) item.setText(_translate("LaserInterface", "97", None)) item = self.ghTable.verticalHeaderItem(98) item.setText(_translate("LaserInterface", "98", None)) item = self.ghTable.verticalHeaderItem(99) item.setText(_translate("LaserInterface", "100", None)) item = self.ghTable.horizontalHeaderItem(0) item.setText(_translate("LaserInterface", "M (horizontal coeff\'s)", None)) item = self.ghTable.horizontalHeaderItem(1) item.setText(_translate("LaserInterface", "N ( vertical coeff\'s )", None)) from radtrack.ui.matplotlibwidget import matplotlibWidget
#!/usr/bin/env python """ A fast minimal static website builder. Minimalsite generates web pages from a source file hierarchy. It supports markdown and textile syntax, but plain txt and html can be evenly used. Template files are python modules, providing huge flexibility and keeping the codebase tiny and simple. """ import os import re import sys import imp import time import codecs import argparse try: import markdown import textile except ImportError: pass __author__ = "Marco Squarcina" __email__ = "lavish@gmail.com" __license__ = "MIT" __version__ = "1.00" # class definitions class Page: """ Meta informations of a page. Attributes: src_pathname pathname of the source file dst_pathname pathname of the generated file src_file file name of the source file dst_file file name of the generated file name file name of the generated file without extension level depth level in the site hierarchy last_edit time.struct_time date of source file last edit """ def __init__(self, src_pathname, level): self.src_pathname = src_pathname self.dst_pathname = self._get_dst_pathname() self.src_file = os.path.basename(self.src_pathname) self.dst_file = os.path.basename(self.dst_pathname) self.name = self._get_name() self.level = level self.last_edit = time.localtime(os.path.getmtime(self.src_pathname)) def _get_dst_pathname(self): """Get destination pathname from source pathname.""" # replace extension dst_pathname = os.path.splitext(self.src_pathname) if dst_pathname[1]: dst_pathname = os.path.join(dst_pathname[0] + \ '.' + template.DST_EXT) dst_pathname = ''.join(dst_pathname) # change destination dir dst_pathname = '/'.join(template.DST.split('/') + dst_pathname.split('/')[len(template.SRC.split('/')):]) # remove index numbers for dirs and files return re.sub('\/\d+_', '/', dst_pathname) def _get_name(self): """Get page name from destination pathname.""" name = os.path.basename(self.dst_pathname) name = os.path.splitext(name)[0] return name.replace('_', ' ') def __str__(self): """Return a textual representation of the page.""" data = "<{}, {}, {}, {}, {}, {}>" return data.format(self.src_pathname, self.dst_pathname, self.src_file, self.dst_file, self.name, self.level) class TreeNode: """ Node of the site hierarchy tree structure. Attributes: page Page object parent parent node, None if current node is the root children list of children """ def __init__(self, page, parent = None): self.page = page self.parent = parent self.children = [] def build(self): """Create the site tree, representing the source file hierarchy.""" # check if src dir includes an index file if self.page.level == 0 \ and not has_index(template.SRC): die('Directory {} does not include a valid index file, aborting.'\ .format(template.SRC), 2) for file_name in os.listdir(self.page.src_pathname): pathname = os.path.join(self.page.src_pathname, file_name) # do not add nodes for links and files starting with '.' if os.path.islink(pathname) or file_name[0] == ".": continue # add nodes for files with an allowed extension elif os.path.isfile(pathname) and syntax(pathname): node = TreeNode(Page(pathname, self.page.level + 1), self) self.children.append(node) # add nodes for directories and go on building the tree elif os.path.isdir(pathname) and has_index(pathname): node = TreeNode(Page(pathname, self.page.level + 1), self) self.children.append(node) node.build() def write(self, margin = ''): """Write the generated site on the file system.""" # a directory if self.children: # create the destination dir, if possible try: os.makedirs(self.page.dst_pathname) except OSError: pass else: print(margin + self.page.dst_pathname) # recursively call write_tree against current node for child in self.children: child.write(margin + ' ') # a file else: print(margin + self.page.dst_pathname) self._write_page() def title(self): """Return the title for the current node.""" if self.page.name == 'index': page_name = self.parent.page.name else: page_name = self.page.name return template.SITE_NAME + ' | ' + page_name def menu(self): """Return the generated code for menu.""" menu_code = '<ul>\n' # build the 'parent page' menu entry entry = '\t<li><a href="{}">&crarr;</a></li>\n' if self.page.name != 'index': menu_code += entry.format("index." + template.DST_EXT) elif self.page.level > 1: menu_code += entry.format("../index." + template.DST_EXT) # build other entries for sibling in sorted(self.parent.children, key=lambda sibling: sibling.page.src_pathname): # and index page or a hidden file, no need to include them if sibling.page.dst_file.startswith("index.") \ or sibling.page.src_file in template.HIDDEN: continue # a page elif not sibling.children: menu_code += '\t<li><a href="{}"'.format(sibling.page.dst_file) # current page if self == sibling: menu_code += ' class="current"' menu_code += '>{}</a></li>\n'.format(sibling.page.name) # a directory else: menu_code += '\t<li><a href="{}/index.{}">{}</a></li>\n' \ .format(sibling.page.dst_file, template.DST_EXT, \ sibling.page.name) menu_code += "</ul>" return menu_code def path(self): """Return the generated code for breadcrumb navigation path.""" path = "" path_node = [] tmp_node = self while tmp_node: path_node.insert(0, tmp_node) tmp_node = tmp_node.parent # no need to display "index" in the path if path_node[-1].page.name == "index": path_node.pop() for i in range(len(path_node)): # last item, it could be current page or current dir if i == len(path_node) - 1: path += path_node[i].page.name # a parent page else: traversal = "../" * \ (self.page.level - path_node[i].page.level - 1) path += '<a href="{}index.{}">{}</a> {} ' \ .format(traversal, template.DST_EXT, path_node[i].page.name, template.PATH_SEPARATOR) return path def write_sitemap(self): """Write an XML sitemap to the file system.""" try: file_desc = open(template.SITEMAP, 'w') except IOError: die("Unable to open {} for writing.".format(template.SITEMAP)) file_desc.write('{}\n{}\n'.format( '<?xml version="1.0" encoding="UTF-8"?>', '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')) self._get_sitemap_entries(file_desc, template.URL + template.PREFIX) file_desc.write('</urlset>') file_desc.close() def _get_sitemap_entries(self, file_desc, prefix): """Recursively write the XML entries for sitemap.""" if self.children: for child in self.children: if not child.page.src_file in template.HIDDEN: if self.page.level: # pylint: disable=W0212 child._get_sitemap_entries(file_desc, prefix + self.page.dst_file + '/') else: # pylint: disable=W0212 child._get_sitemap_entries(file_desc, prefix) else: file_desc.write(" <url>\n <loc>{}</loc>\n <lastmod>{}</lastmod>\n </url>\n" \ .format(prefix + self.page.dst_file, time.strftime("%Y-%m-%d", self.page.last_edit))) def _write_page(self): """Write a single page on the file system.""" # open source file h_src_pathname = codecs.open(self.page.src_pathname, "r", "utf-8") src_content = h_src_pathname.read() h_src_pathname.close() # build page dst_content = template.header(self) if syntax(self.page.src_pathname) == "markdown" \ and "markdown" in template.SRC_EXT: dst_content += markdown.markdown(src_content) elif syntax(self.page.src_pathname) == "textile" \ and "textile" in template.SRC_EXT: dst_content += textile.textile(src_content) elif syntax(self.page.src_pathname) == "plain" \ and "plain" in template.SRC_EXT: dst_content += src_content dst_content += template.footer(self) dst_content = dst_content.replace("%%%TITLE%%%", self.title()) dst_content = dst_content.replace("%%%PATH%%%", self.path()) dst_content = dst_content.replace("%%%MENU%%%", self.menu()) dst_content = dst_content.replace("%%%VERSION%%%", __version__) # write destination file h_dst_pathname = codecs.open(self.page.dst_pathname, "w", "utf-8") h_dst_pathname.write(dst_content) h_dst_pathname.close() def __str__(self): """Return the entire tree structure.""" tree = " " * self.page.level + str(self.page) + "\n" for child in self.children: tree += str(child) return tree # function definitions def syntax(pathname): """Return the markup language used in the given pathname.""" for lang in list(template.SRC_EXT.keys()): if template.SRC_EXT[lang] == pathname.split('.')[-1]: return lang return None def has_index(pathname): """Check if there's an index file in the given directory pathname.""" for lang in list(template.SRC_EXT.keys()): if os.path.isfile(pathname + "/index." + template.SRC_EXT[lang]): return True return False def import_template(pathname): """Load the python module in the provided file name as a template.""" if not os.path.isfile(pathname): die("Template file does not exist. Aborting") (path, name) = os.path.split(pathname) (name, ext) = os.path.splitext(name) if ext == '.py' and name.endswith('_template'): (file_desc, pathname, data) = imp.find_module(name, [path]) return imp.load_module(name, file_desc, pathname, data) else: die("Invalid template file name. Valid templates must terminate with '_template.py'.") def check_template(): """Check mandatory variable/function definitions in the provided template.""" template_data = dir(template) required_data = ['DST', 'DST_EXT', 'HIDDEN', 'HOME', 'PATH_SEPARATOR', \ 'PREFIX', 'SITEMAP', 'SITE_NAME', 'SRC', 'SRC_EXT', 'URL', \ 'footer', 'header'] for data in required_data: if not data in template_data: die("Missing {} definition in template file. Aborting" \ .format(data)) def notice(msg): """Write a notice message to stdout.""" print("[*] {}".format(msg)) def die(msg, code=1): """Write an error message to stderr and exit.""" sys.stderr.write("[!] {}\n".format(msg)) sys.exit(code) def main(): """Main method.""" global template parser = argparse.ArgumentParser(description = \ 'Fast minimal static website builder') parser.add_argument('-t', '--template', type=str, required=True, \ help="specify a template file name. Valid templates must terminate with '_template.py'") parser.add_argument('-V', '--verbose', action='store_true', \ default=False, help='verbosely display site structure') parser.add_argument('-s', '--src', type=str, default=None, \ help='source dir, where the textual hierarchy resides') parser.add_argument('-d', '--dst', type=str, default=None, \ help='destination dir, where the html pages will be written') parser.add_argument('-m', '--sitemap', type=str, default=None, \ help='full path name for the XML sitemap') parser.add_argument('-v', '--version', action='version', \ version='%(prog)s-'+__version__) args = parser.parse_args() # load template template = import_template(args.template) # check template integrity check_template() # check markup modules for lang in ('markdown', 'textile'): if not lang in sys.modules: del template.SRC_EXT[lang] if args.verbose: print("[!] Disabling {} support, module not found".format(lang)) if not template.SRC_EXT: die("No modules for parsing files found. See README for requirements.") # check src and dst directories if args.src: template.SRC = args.src if args.dst: template.DST = args.dst # assign sitemap pathname if args.sitemap: template.SITEMAP = args.sitemap # fix trailing slashes template.SRC = os.path.abspath(template.SRC) template.DST = os.path.abspath(template.DST) for directory in [template.SRC, template.DST]: if not os.path.isdir(directory): die('Directory {} does not exist, aborting.'.format(directory), 2) # start writing pages notice('Processing files from {}'.format(template.SRC)) root = TreeNode(Page(template.SRC, 0)) root.page.name = template.HOME root.build() notice('Writing {} files into {}'.format(template.DST_EXT, template.DST)) root.write() # write sitemap if template.SITEMAP: notice('Writing sitemap to {}'.format(template.SITEMAP)) root.write_sitemap() if args.verbose: notice('Printing site structure') notice('values as: <src_pathname, dst_pathname, src_file, dst_file, name, level>') print(root) if __name__ == "__main__": main()
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### """ Blender exporter for Three.js (ASCII JSON format). TODO - binary format """ import bpy import mathutils import shutil import os import os.path import math import operator import random # ##################################################### # Configuration # ##################################################### DEFAULTS = { "bgcolor" : [0, 0, 0], "bgalpha" : 1.0, "position" : [0, 0, 0], "rotation" : [-math.pi/2, 0, 0], "scale" : [1, 1, 1], "camera" : { "name" : "default_camera", "type" : "perspective", "near" : 1, "far" : 10000, "fov" : 60, "aspect": 1.333, "position" : [0, 0, 10], "target" : [0, 0, 0] }, "light" : { "name" : "default_light", "type" : "directional", "direction" : [0, 1, 1], "color" : [1, 1, 1], "intensity" : 0.8 } } # default colors for debugging (each material gets one distinct color): # white, red, green, blue, yellow, cyan, magenta COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee] # ##################################################### # Templates - scene # ##################################################### TEMPLATE_SCENE_ASCII = """\ { "metadata" : { "formatVersion" : 3, "sourceFile" : "%(fname)s", "generatedBy" : "Blender 2.60 Exporter", "objects" : %(nobjects)s, "geometries" : %(ngeometries)s, "materials" : %(nmaterials)s, "textures" : %(ntextures)s }, "type" : "scene", "urlBaseType" : %(basetype)s, %(sections)s "transform" : { "position" : %(position)s, "rotation" : %(rotation)s, "scale" : %(scale)s }, "defaults" : { "bgcolor" : %(bgcolor)s, "bgalpha" : %(bgalpha)f, "camera" : %(defcamera)s } } """ TEMPLATE_SECTION = """ "%s" : { %s }, """ TEMPLATE_OBJECT = """\ %(object_id)s : { "geometry" : %(geometry_id)s, "groups" : [ %(group_id)s ], "materials" : [ %(material_id)s ], "position" : %(position)s, "rotation" : %(rotation)s, "quaternion": %(quaternion)s, "scale" : %(scale)s, "visible" : %(visible)s, "castsShadow" : %(castsShadow)s, "meshCollider" : %(meshCollider)s, "trigger" : %(trigger)s }""" TEMPLATE_EMPTY = """\ %(object_id)s : { "groups" : [ %(group_id)s ], "position" : %(position)s, "rotation" : %(rotation)s, "quaternion": %(quaternion)s, "scale" : %(scale)s, "trigger" : %(trigger)s }""" TEMPLATE_GEOMETRY_LINK = """\ %(geometry_id)s : { "type" : "ascii_mesh", "url" : %(model_file)s }""" TEMPLATE_GEOMETRY_EMBED = """\ %(geometry_id)s : { "type" : "embedded_mesh", "id" : %(embed_id)s }""" TEMPLATE_TEXTURE = """\ %(texture_id)s : { "url": %(texture_file)s%(extras)s }""" TEMPLATE_MATERIAL_SCENE = """\ %(material_id)s : { "type": %(type)s, "parameters": { %(parameters)s } }""" TEMPLATE_CAMERA_PERSPECTIVE = """\ %(camera_id)s : { "type" : "perspective", "fov" : %(fov)f, "aspect": %(aspect)f, "near" : %(near)f, "far" : %(far)f, "position": %(position)s, "target" : %(target)s }""" TEMPLATE_CAMERA_ORTHO = """\ %(camera_id)s: { "type" : "ortho", "left" : %(left)f, "right" : %(right)f, "top" : %(top)f, "bottom": %(bottom)f, "near" : %(near)f, "far" : %(far)f, "position": %(position)s, "target" : %(target)s }""" TEMPLATE_LIGHT_DIRECTIONAL = """\ %(light_id)s: { "type" : "directional", "direction" : %(direction)s, "color" : %(color)d, "intensity" : %(intensity).2f }""" TEMPLATE_LIGHT_POINT = """\ %(light_id)s: { "type" : "point", "position" : %(position)s, "color" : %(color)d, "intensity" : %(intensity).3f }""" TEMPLATE_VEC4 = '[ %f, %f, %f, %f ]' TEMPLATE_VEC3 = '[ %f, %f, %f ]' TEMPLATE_VEC2 = '[ %f, %f ]' TEMPLATE_STRING = '"%s"' TEMPLATE_HEX = "0x%06x" # ##################################################### # Templates - model # ##################################################### TEMPLATE_FILE_ASCII = """\ { "metadata" : { "formatVersion" : 3, "generatedBy" : "Blender 2.60 Exporter", "vertices" : %(nvertex)d, "faces" : %(nface)d, "normals" : %(nnormal)d, "colors" : %(ncolor)d, "uvs" : %(nuv)d, "materials" : %(nmaterial)d, "morphTargets" : %(nmorphTarget)d }, %(model)s } """ TEMPLATE_MODEL_ASCII = """\ "scale" : %(scale)f, "materials": [%(materials)s], "vertices": [%(vertices)s], "morphTargets": [%(morphTargets)s], "normals": [%(normals)s], "colors": [%(colors)s], "uvs": [[%(uvs)s]], "faces": [%(faces)s] """ TEMPLATE_VERTEX = "%f,%f,%f" TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d" TEMPLATE_N = "%f,%f,%f" TEMPLATE_UV = "%f,%f" TEMPLATE_C = "%d" # ##################################################### # Utils # ##################################################### def veckey3(x,y,z): return round(x, 6), round(y, 6), round(z, 6) def veckey3d(v): return veckey3(v.x, v.y, v.z) def veckey2d(v): return round(v[0], 6), round(v[1], 6) def get_normal_indices(v, normals, mesh): n = [] mv = mesh.vertices for i in v: normal = mv[i].normal key = veckey3d(normal) n.append( normals[key] ) return n def get_uv_indices(face_index, uvs, mesh): uv = [] uv_layer = mesh.uv_textures.active.data for i in uv_layer[face_index].uv: uv.append( uvs[veckey2d(i)] ) return uv def get_color_indices(face_index, colors, mesh): c = [] color_layer = mesh.vertex_colors.active.data face_colors = color_layer[face_index] face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4 for i in face_colors: c.append( colors[hexcolor(i)] ) return c def rgb2int(rgb): color = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255); return color # ##################################################### # Utils - files # ##################################################### def write_file(fname, content): out = open(fname, "w") out.write(content) out.close() def ensure_folder_exist(foldername): """Create folder (with whole path) if it doesn't exist yet.""" if not os.access(foldername, os.R_OK|os.W_OK|os.X_OK): os.makedirs(foldername) def ensure_extension(filepath, extension): if not filepath.lower().endswith(extension): filepath += extension return filepath def generate_mesh_filename(meshname, filepath): normpath = os.path.normpath(filepath) path, ext = os.path.splitext(normpath) return "%s.%s%s" % (path, meshname, ext) # ##################################################### # Utils - alignment # ##################################################### def bbox(vertices): """Compute bounding box of vertex array. """ if len(vertices)>0: minx = maxx = vertices[0].co.x miny = maxy = vertices[0].co.y minz = maxz = vertices[0].co.z for v in vertices[1:]: if v.co.x < minx: minx = v.co.x elif v.co.x > maxx: maxx = v.co.x if v.co.y < miny: miny = v.co.y elif v.co.y > maxy: maxy = v.co.y if v.co.z < minz: minz = v.co.z elif v.co.z > maxz: maxz = v.co.z return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] } else: return { 'x':[0,0], 'y':[0,0], 'z':[0,0] } def translate(vertices, t): """Translate array of vertices by vector t. """ for i in range(len(vertices)): vertices[i].co.x += t[0] vertices[i].co.y += t[1] vertices[i].co.z += t[2] def center(vertices): """Center model (middle of bounding box). """ bb = bbox(vertices) cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0 cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0 cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0 translate(vertices, [-cx,-cy,-cz]) def top(vertices): """Align top of the model with the floor (Y-axis) and center it around X and Z. """ bb = bbox(vertices) cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0 cy = bb['y'][1] cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0 translate(vertices, [-cx,-cy,-cz]) def bottom(vertices): """Align bottom of the model with the floor (Y-axis) and center it around X and Z. """ bb = bbox(vertices) cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0 cy = bb['y'][0] cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0 translate(vertices, [-cx,-cy,-cz]) # ##################################################### # Elements rendering # ##################################################### def hexcolor(c): return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255) def generate_vertices(vertices, option_vertices_truncate, option_vertices): if not option_vertices: return "" return ",".join(generate_vertex(v, option_vertices_truncate) for v in vertices) def generate_vertex(v, option_vertices_truncate): if not option_vertices_truncate: return TEMPLATE_VERTEX % (v.co.x, v.co.y, v.co.z) else: return TEMPLATE_VERTEX_TRUNCATE % (v.co.x, v.co.y, v.co.z) def generate_normal(n): return TEMPLATE_N % (n[0], n[1], n[2]) def generate_vertex_color(c): return TEMPLATE_C % c def generate_uv(uv): return TEMPLATE_UV % (uv[0], 1.0 - uv[1]) # ##################################################### # Model exporter - faces # ##################################################### def setBit(value, position, on): if on: mask = 1 << position return (value | mask) else: mask = ~(1 << position) return (value & mask) def generate_faces(normals, uvs, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, flipyz, option_faces): if not option_faces: return "", 0 vertex_offset = 0 material_offset = 0 chunks = [] for mesh, object in meshes: faceUV = (len(mesh.uv_textures) > 0) vertexUV = (len(mesh.sticky) > 0) vertexColors = len(mesh.vertex_colors) > 0 mesh_colors = option_colors and vertexColors mesh_uvs = option_uv_coords and (faceUV or vertexUV) if faceUV or vertexUV: active_uv_layer = mesh.uv_textures.active if not active_uv_layer: mesh_extract_uvs = False if vertexColors: active_col_layer = mesh.vertex_colors.active if not active_col_layer: mesh_extract_colors = False for i, f in enumerate(mesh.faces): face = generate_face(f, i, normals, uvs, colors, mesh, option_normals, mesh_colors, mesh_uvs, option_materials, flipyz, vertex_offset, material_offset) chunks.append(face) vertex_offset += len(mesh.vertices) material_count = len(mesh.materials) if material_count == 0: material_count = 1 material_offset += material_count return ",".join(chunks), len(chunks) def generate_face(f, faceIndex, normals, uvs, colors, mesh, option_normals, option_colors, option_uv_coords, option_materials, flipyz, vertex_offset, material_offset): isTriangle = ( len(f.vertices) == 3 ) if isTriangle: nVertices = 3 else: nVertices = 4 hasMaterial = option_materials hasFaceUvs = False # not supported in Blender hasFaceVertexUvs = option_uv_coords hasFaceNormals = False # don't export any face normals (as they are computed in engine) hasFaceVertexNormals = option_normals hasFaceColors = False # not supported in Blender hasFaceVertexColors = option_colors faceType = 0 faceType = setBit(faceType, 0, not isTriangle) faceType = setBit(faceType, 1, hasMaterial) faceType = setBit(faceType, 2, hasFaceUvs) faceType = setBit(faceType, 3, hasFaceVertexUvs) faceType = setBit(faceType, 4, hasFaceNormals) faceType = setBit(faceType, 5, hasFaceVertexNormals) faceType = setBit(faceType, 6, hasFaceColors) faceType = setBit(faceType, 7, hasFaceVertexColors) faceData = [] # order is important, must match order in JSONLoader # face type # vertex indices # material index # face uvs index # face vertex uvs indices # face color index # face vertex colors indices faceData.append(faceType) # must clamp in case on polygons bigger than quads for i in range(nVertices): index = f.vertices[i] + vertex_offset faceData.append(index) if hasMaterial: index = f.material_index + material_offset faceData.append( index ) if hasFaceVertexUvs: uv = get_uv_indices(faceIndex, uvs, mesh) for i in range(nVertices): index = uv[i] faceData.append(index) if hasFaceVertexNormals: n = get_normal_indices(f.vertices, normals, mesh) for i in range(nVertices): index = n[i] faceData.append(index) if hasFaceVertexColors: c = get_color_indices(faceIndex, colors, mesh) for i in range(nVertices): index = c[i] faceData.append(index) return ",".join( map(str, faceData) ) # ##################################################### # Model exporter - normals # ##################################################### def extract_vertex_normals(mesh, normals, count): for f in mesh.faces: for v in f.vertices: normal = mesh.vertices[v].normal key = veckey3d(normal) if key not in normals: normals[key] = count count += 1 return count def generate_normals(normals, option_normals): if not option_normals: return "" chunks = [] for key, index in sorted(normals.items(), key = operator.itemgetter(1)): chunks.append(key) return ",".join(generate_normal(n) for n in chunks) # ##################################################### # Model exporter - vertex colors # ##################################################### def extract_vertex_colors(mesh, colors, count): color_layer = mesh.vertex_colors.active.data for face_index, face in enumerate(mesh.faces): face_colors = color_layer[face_index] face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4 for c in face_colors: key = hexcolor(c) if key not in colors: colors[key] = count count += 1 return count def generate_vertex_colors(colors, option_colors): if not option_colors: return "" chunks = [] for key, index in sorted(colors.items(), key=operator.itemgetter(1)): chunks.append(key) return ",".join(generate_vertex_color(c) for c in chunks) # ##################################################### # Model exporter - UVs # ##################################################### def extract_uvs(mesh, uvs, count): uv_layer = mesh.uv_textures.active.data for face_index, face in enumerate(mesh.faces): for uv_index, uv in enumerate(uv_layer[face_index].uv): key = veckey2d(uv) if key not in uvs: uvs[key] = count count += 1 return count def generate_uvs(uvs, option_uv_coords): if not option_uv_coords: return "" chunks = [] for key, index in sorted(uvs.items(), key=operator.itemgetter(1)): chunks.append(key) return ",".join(generate_uv(n) for n in chunks) # ##################################################### # Model exporter - materials # ##################################################### def generate_color(i): """Generate hex color corresponding to integer. Colors should have well defined ordering. First N colors are hardcoded, then colors are random (must seed random number generator with deterministic value before getting colors). """ if i < len(COLORS): #return "0x%06x" % COLORS[i] return COLORS[i] else: #return "0x%06x" % int(0xffffff * random.random()) return int(0xffffff * random.random()) def generate_mtl(materials): """Generate dummy materials. """ mtl = {} for m in materials: index = materials[m] mtl[m] = { "DbgName": m, "DbgIndex": index, "DbgColor": generate_color(index), "vertexColors" : False } return mtl def value2string(v): if type(v) == str and v[0:2] != "0x": return '"%s"' % v elif type(v) == bool: return str(v).lower() elif type(v) == list: return "[%s]" % (", ".join(value2string(x) for x in v)) return str(v) def generate_materials(mtl, materials, draw_type): """Generate JS array of materials objects """ mtl_array = [] for m in mtl: index = materials[m] # add debug information # materials should be sorted according to how # they appeared in OBJ file (for the first time) # this index is identifier used in face definitions mtl[m]['DbgName'] = m mtl[m]['DbgIndex'] = index mtl[m]['DbgColor'] = generate_color(index) if draw_type in [ "BOUNDS", "WIRE" ]: mtl[m]['wireframe'] = True mtl[m]['DbgColor'] = 0xff0000 mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())]) mtl_string = "\t{\n%s\n\t}" % mtl_raw mtl_array.append([index, mtl_string]) return ",\n\n".join([m for i,m in sorted(mtl_array)]), len(mtl_array) def extract_materials(mesh, scene, option_colors, option_copy_textures, filepath): world = scene.world materials = {} for m in mesh.materials: if m: materials[m.name] = {} material = materials[m.name] material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0], m.diffuse_intensity * m.diffuse_color[1], m.diffuse_intensity * m.diffuse_color[2]] material['colorSpecular'] = [m.specular_intensity * m.specular_color[0], m.specular_intensity * m.specular_color[1], m.specular_intensity * m.specular_color[2]] world_ambient_color = [0, 0, 0] if world: world_ambient_color = world.ambient_color material['colorAmbient'] = [m.ambient * world_ambient_color[0], m.ambient * world_ambient_color[1], m.ambient * world_ambient_color[2]] material['transparency'] = m.alpha # not sure about mapping values to Blinn-Phong shader # Blender uses INT from [1, 511] with default 0 # http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness material["specularCoef"] = m.specular_hardness textures = guess_material_textures(m) handle_texture('diffuse', textures, material, filepath, option_copy_textures) handle_texture('light', textures, material, filepath, option_copy_textures) handle_texture('normal', textures, material, filepath, option_copy_textures) handle_texture('specular', textures, material, filepath, option_copy_textures) material["vertexColors"] = m.THREE_useVertexColors and option_colors # can't really use this reliably to tell apart Phong from Lambert # as Blender defaults to non-zero specular color #if m.specular_intensity > 0.0 and (m.specular_color[0] > 0 or m.specular_color[1] > 0 or m.specular_color[2] > 0): # material['shading'] = "Phong" #else: # material['shading'] = "Lambert" if textures['normal']: material['shading'] = "Phong" else: material['shading'] = m.THREE_materialType return materials def generate_materials_string(mesh, scene, option_colors, draw_type, option_copy_textures, filepath, offset): random.seed(42) # to get well defined color order for debug materials materials = {} if mesh.materials: for i, m in enumerate(mesh.materials): mat_id = i + offset if m: materials[m.name] = mat_id else: materials["undefined_dummy_%0d" % mat_id] = mat_id if not materials: materials = { 'default': 0 } # default dummy materials mtl = generate_mtl(materials) # extract real materials from the mesh mtl.update(extract_materials(mesh, scene, option_colors, option_copy_textures, filepath)) return generate_materials(mtl, materials, draw_type) def handle_texture(id, textures, material, filepath, option_copy_textures): if textures[id]: texName = 'map%s' % id.capitalize() repeatName = 'map%sRepeat' % id.capitalize() wrapName = 'map%sWrap' % id.capitalize() slot = textures[id]['slot'] texture = textures[id]['texture'] image = texture.image fname = extract_texture_filename(image) material[texName] = fname if option_copy_textures: save_image(image, fname, filepath) if texture.repeat_x != 1 or texture.repeat_y != 1: material[repeatName] = [texture.repeat_x, texture.repeat_y] if texture.extension == "REPEAT": wrap_x = "repeat" wrap_y = "repeat" if texture.use_mirror_x: wrap_x = "mirror" if texture.use_mirror_y: wrap_y = "mirror" material[wrapName] = [wrap_x, wrap_y] if slot.use_map_normal: if slot.normal_factor != 1.0: material['mapNormalFactor'] = slot.normal_factor # ##################################################### # ASCII model generator # ##################################################### def generate_ascii_model(meshes, morphs, scene, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, align_model, flipyz, option_scale, option_copy_textures, filepath, option_animation, option_frame_step): vertices = [] vertex_offset = 0 vertex_offsets = [] nnormal = 0 normals = {} ncolor = 0 colors = {} nuv = 0 uvs = {} nmaterial = 0 materials = [] for mesh, object in meshes: faceUV = (len(mesh.uv_textures) > 0) vertexUV = (len(mesh.sticky) > 0) vertexColors = len(mesh.vertex_colors) > 0 mesh_extract_colors = option_colors and vertexColors mesh_extract_uvs = option_uv_coords and (faceUV or vertexUV) if faceUV or vertexUV: active_uv_layer = mesh.uv_textures.active if not active_uv_layer: mesh_extract_uvs = False if vertexColors: active_col_layer = mesh.vertex_colors.active if not active_col_layer: mesh_extract_colors = False vertex_offsets.append(vertex_offset) vertex_offset += len(vertices) vertices.extend(mesh.vertices[:]) if option_normals: nnormal = extract_vertex_normals(mesh, normals, nnormal) if mesh_extract_colors: ncolor = extract_vertex_colors(mesh, colors, ncolor) if mesh_extract_uvs: nuv = extract_uvs(mesh, uvs, nuv) if option_materials: mesh_materials, nmaterial = generate_materials_string(mesh, scene, mesh_extract_colors, object.draw_type, option_copy_textures, filepath, nmaterial) materials.append(mesh_materials) morphTargets_string = "" nmorphTarget = 0 if option_animation: chunks = [] for i, morphVertices in enumerate(morphs): morphTarget = '{ "name": "%s_%06d", "vertices": [%s] }' % ("animation", i, morphVertices) chunks.append(morphTarget) morphTargets_string = ",\n\t".join(chunks) nmorphTarget = len(morphs) if align_model == 1: center(vertices) elif align_model == 2: bottom(vertices) elif align_model == 3: top(vertices) faces_string, nfaces = generate_faces(normals, uvs, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, flipyz, option_faces) materials_string = ",\n\n".join(materials) model_string = TEMPLATE_MODEL_ASCII % { "scale" : option_scale, "uvs" : generate_uvs(uvs, option_uv_coords), "normals" : generate_normals(normals, option_normals), "colors" : generate_vertex_colors(colors, option_colors), "materials" : materials_string, "vertices" : generate_vertices(vertices, option_vertices_truncate, option_vertices), "faces" : faces_string, "morphTargets" : morphTargets_string } text = TEMPLATE_FILE_ASCII % { "nvertex" : len(vertices), "nface" : nfaces, "nuv" : nuv, "nnormal" : nnormal, "ncolor" : ncolor, "nmaterial" : nmaterial, "nmorphTarget": nmorphTarget, "model" : model_string } return text, model_string # ##################################################### # Model exporter - export single mesh # ##################################################### def extract_meshes(objects, scene, export_single_model, option_scale): meshes = [] for object in objects: if object.type == "MESH" and object.THREE_exportGeometry: # collapse modifiers into mesh mesh = object.to_mesh(scene, True, 'RENDER') if not mesh: raise Exception("Error, could not get mesh data from object [%s]" % object.name) # that's what Blender's native export_obj.py does # to flip YZ if export_single_model: X_ROT = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X') mesh.transform(X_ROT * object.matrix_world) mesh.calc_normals() mesh.transform(mathutils.Matrix.Scale(option_scale, 4)) meshes.append([mesh, object]) return meshes def generate_mesh_string(objects, scene, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, align_model, flipyz, option_scale, export_single_model, option_copy_textures, filepath, option_animation, option_frame_step): meshes = extract_meshes(objects, scene, export_single_model, option_scale) morphs = [] if option_animation: original_frame = scene.frame_current # save animation state scene_frames = range(scene.frame_start, scene.frame_end + 1, option_frame_step) for frame in scene_frames: scene.frame_set(frame, 0.0) anim_meshes = extract_meshes(objects, scene, export_single_model, option_scale) frame_vertices = [] for mesh, object in anim_meshes: frame_vertices.extend(mesh.vertices[:]) morphVertices = generate_vertices(frame_vertices, option_vertices_truncate, option_vertices) morphs.append(morphVertices) # remove temp meshes for mesh, object in anim_meshes: bpy.data.meshes.remove(mesh) scene.frame_set(original_frame, 0.0) # restore animation state text, model_string = generate_ascii_model(meshes, morphs, scene, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, align_model, flipyz, option_scale, option_copy_textures, filepath, option_animation, option_frame_step) # remove temp meshes for mesh, object in meshes: bpy.data.meshes.remove(mesh) return text, model_string def export_mesh(objects, scene, filepath, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, align_model, flipyz, option_scale, export_single_model, option_copy_textures, option_animation, option_frame_step): """Export single mesh""" text, model_string = generate_mesh_string(objects, scene, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, align_model, flipyz, option_scale, export_single_model, option_copy_textures, filepath, option_animation, option_frame_step) write_file(filepath, text) print("writing", filepath, "done") # ##################################################### # Scene exporter - render elements # ##################################################### def generate_vec4(vec): return TEMPLATE_VEC4 % (vec[0], vec[1], vec[2], vec[3]) def generate_vec3(vec): return TEMPLATE_VEC3 % (vec[0], vec[1], vec[2]) def generate_vec2(vec): return TEMPLATE_VEC2 % (vec[0], vec[1]) def generate_hex(number): return TEMPLATE_HEX % number def generate_string(s): return TEMPLATE_STRING % s def generate_string_list(src_list): return ", ".join(generate_string(item) for item in src_list) def generate_section(label, content): return TEMPLATE_SECTION % (label, content) def get_mesh_filename(mesh): object_id = mesh["data"]["name"] filename = "%s.js" % sanitize(object_id) return filename def generate_material_id_list(materials): chunks = [] for material in materials: chunks.append(material.name) return chunks def generate_group_id_list(obj): chunks = [] for group in bpy.data.groups: if obj.name in group.objects: chunks.append(group.name) return chunks def generate_bool_property(property): if property: return "true" return "false" # ##################################################### # Scene exporter - objects # ##################################################### def generate_objects(data): chunks = [] for obj in data["objects"]: if obj.type == "MESH" and obj.THREE_exportGeometry: object_id = obj.name if len(obj.modifiers) > 0: geo_name = obj.name else: geo_name = obj.data.name geometry_id = "geo_%s" % geo_name material_ids = generate_material_id_list(obj.material_slots) group_ids = generate_group_id_list(obj) position, quaternion, scale = obj.matrix_world.decompose() rotation = quaternion.to_euler("XYZ") material_string = "" if len(material_ids) > 0: material_string = generate_string_list(material_ids) group_string = "" if len(group_ids) > 0: group_string = generate_string_list(group_ids) castsShadow = obj.THREE_castsShadow meshCollider = obj.THREE_meshCollider triggerType = obj.THREE_triggerType visible = True #if obj.draw_type in ["BOUNDS", "WIRE"] and (meshCollider or castsShadow): if meshCollider or castsShadow: visible = False geometry_string = generate_string(geometry_id) object_string = TEMPLATE_OBJECT % { "object_id" : generate_string(object_id), "geometry_id" : geometry_string, "group_id" : group_string, "material_id" : material_string, "position" : generate_vec3(position), "rotation" : generate_vec3(rotation), "quaternion" : generate_vec4(quaternion), "scale" : generate_vec3(scale), "castsShadow" : generate_bool_property(castsShadow), "meshCollider" : generate_bool_property(meshCollider), "trigger" : generate_string(triggerType), "visible" : generate_bool_property(visible) } chunks.append(object_string) elif obj.type == "EMPTY" or (obj.type == "MESH" and not obj.THREE_exportGeometry): object_id = obj.name group_ids = generate_group_id_list(obj) position, quaternion, scale = obj.matrix_world.decompose() rotation = quaternion.to_euler("XYZ") group_string = "" if len(group_ids) > 0: group_string = generate_string_list(group_ids) triggerType = obj.THREE_triggerType object_string = TEMPLATE_EMPTY % { "object_id" : generate_string(object_id), "group_id" : group_string, "position" : generate_vec3(position), "rotation" : generate_vec3(rotation), "quaternion" : generate_vec4(quaternion), "scale" : generate_vec3(scale), "trigger" : generate_string(triggerType), } chunks.append(object_string) return ",\n\n".join(chunks), len(chunks) # ##################################################### # Scene exporter - geometries # ##################################################### def generate_geometries(data): chunks = [] geo_set = set() for obj in data["objects"]: if obj.type == "MESH" and obj.THREE_exportGeometry: if len(obj.modifiers) > 0: name = obj.name else: name = obj.data.name if name not in geo_set: geometry_id = "geo_%s" % name if data["embed_meshes"]: embed_id = "emb_%s" % name geometry_string = TEMPLATE_GEOMETRY_EMBED % { "geometry_id" : generate_string(geometry_id), "embed_id" : generate_string(embed_id) } else: model_filename = os.path.basename(generate_mesh_filename(name, data["filepath"])) geometry_string = TEMPLATE_GEOMETRY_LINK % { "geometry_id" : generate_string(geometry_id), "model_file" : generate_string(model_filename) } chunks.append(geometry_string) geo_set.add(name) return ",\n\n".join(chunks), len(chunks) # ##################################################### # Scene exporter - textures # ##################################################### def generate_textures_scene(data): chunks = [] # TODO: extract just textures actually used by some objects in the scene for texture in bpy.data.textures: if texture.type == 'IMAGE' and texture.image: img = texture.image texture_id = img.name texture_file = extract_texture_filename(img) if data["copy_textures"]: save_image(img, texture_file, data["filepath"]) extras = "" if texture.repeat_x != 1 or texture.repeat_y != 1: extras += ',\n "repeat": [%f, %f]' % (texture.repeat_x, texture.repeat_y) if texture.extension == "REPEAT": wrap_x = "repeat" wrap_y = "repeat" if texture.use_mirror_x: wrap_x = "mirror" if texture.use_mirror_y: wrap_y = "mirror" extras += ',\n "wrap": ["%s", "%s"]' % (wrap_x, wrap_y) texture_string = TEMPLATE_TEXTURE % { "texture_id" : generate_string(texture_id), "texture_file" : generate_string(texture_file), "extras" : extras } chunks.append(texture_string) return ",\n\n".join(chunks), len(chunks) def extract_texture_filename(image): fn = bpy.path.abspath(image.filepath) fn = os.path.normpath(fn) fn_strip = os.path.basename(fn) return fn_strip def save_image(img, name, fpath): dst_dir = os.path.dirname(fpath) dst_path = os.path.join(dst_dir, name) ensure_folder_exist(dst_dir) if img.packed_file: img.save_render(dst_path) else: src_path = bpy.path.abspath(img.filepath) shutil.copy(src_path, dst_dir) # ##################################################### # Scene exporter - materials # ##################################################### def extract_material_data(m, option_colors): world = bpy.context.scene.world material = { 'name': m.name } material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0], m.diffuse_intensity * m.diffuse_color[1], m.diffuse_intensity * m.diffuse_color[2]] material['colorSpecular'] = [m.specular_intensity * m.specular_color[0], m.specular_intensity * m.specular_color[1], m.specular_intensity * m.specular_color[2]] world_ambient_color = [0, 0, 0] if world: world_ambient_color = world.ambient_color material['colorAmbient'] = [m.ambient * world_ambient_color[0], m.ambient * world_ambient_color[1], m.ambient * world_ambient_color[2]] material['transparency'] = m.alpha # not sure about mapping values to Blinn-Phong shader # Blender uses INT from [1,511] with default 0 # http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness material["specularCoef"] = m.specular_hardness material["vertexColors"] = m.THREE_useVertexColors and option_colors material['mapDiffuse'] = "" material['mapLight'] = "" material['mapSpecular'] = "" material['mapNormal'] = "" material['mapNormalFactor'] = 1.0 textures = guess_material_textures(m) if textures['diffuse']: material['mapDiffuse'] = textures['diffuse']['texture'].image.name if textures['light']: material['mapLight'] = textures['light']['texture'].image.name if textures['specular']: material['mapSpecular'] = textures['specular']['texture'].image.name if textures['normal']: material['mapNormal'] = textures['normal']['texture'].image.name if textures['normal']['slot'].use_map_normal: material['mapNormalFactor'] = textures['normal']['slot'].normal_factor material['shading'] = m.THREE_materialType return material def guess_material_textures(material): textures = { 'diffuse' : None, 'light' : None, 'normal' : None, 'specular': None } # just take first textures of each, for the moment three.js materials can't handle more # assume diffuse comes before lightmap, normalmap has checked flag for i in range(len(material.texture_slots)): slot = material.texture_slots[i] if slot: texture = slot.texture if slot.use and texture.type == 'IMAGE': if texture.use_normal_map: textures['normal'] = { "texture": texture, "slot": slot } elif slot.use_map_specular or slot.use_map_hardness: textures['specular'] = { "texture": texture, "slot": slot } else: if not textures['diffuse']: textures['diffuse'] = { "texture": texture, "slot": slot } else: textures['light'] = { "texture": texture, "slot": slot } if textures['diffuse'] and textures['normal'] and textures['light'] and textures['specular']: break return textures def generate_material_string(material): material_id = material["name"] # default to Lambert shading = material.get("shading", "Lambert") # normal mapped materials must use Phong # to get all required parameters for normal shader if material['mapNormal']: shading = "Phong" type_map = { "Lambert" : "MeshLambertMaterial", "Phong" : "MeshPhongMaterial" } material_type = type_map.get(shading, "MeshBasicMaterial") parameters = '"color": %d' % rgb2int(material["colorDiffuse"]) parameters += ', "opacity": %.2g' % material["transparency"] if shading == "Phong": parameters += ', "ambient": %d' % rgb2int(material["colorAmbient"]) parameters += ', "specular": %d' % rgb2int(material["colorSpecular"]) parameters += ', "shininess": %.1g' % material["specularCoef"] colorMap = material['mapDiffuse'] lightMap = material['mapLight'] specularMap = material['mapSpecular'] normalMap = material['mapNormal'] normalMapFactor = material['mapNormalFactor'] if colorMap: parameters += ', "map": %s' % generate_string(colorMap) if lightMap: parameters += ', "lightMap": %s' % generate_string(lightMap) if specularMap: parameters += ', "specularMap": %s' % generate_string(specularMap) if normalMap: parameters += ', "normalMap": %s' % generate_string(normalMap) if normalMapFactor != 1.0: parameters += ', "normalMapFactor": %f' % normalMapFactor if material['vertexColors']: parameters += ', "vertexColors": "vertex"' material_string = TEMPLATE_MATERIAL_SCENE % { "material_id" : generate_string(material_id), "type" : generate_string(material_type), "parameters" : parameters } return material_string def generate_materials_scene(data): chunks = [] # TODO: extract just materials actually used by some objects in the scene for m in bpy.data.materials: material = extract_material_data(m, data["use_colors"]) material_string = generate_material_string(material) chunks.append(material_string) return ",\n\n".join(chunks), len(chunks) # ##################################################### # Scene exporter - cameras # ##################################################### def generate_cameras(data): if data["use_cameras"]: cams = bpy.data.objects cams = [ob for ob in cams if (ob.type == 'CAMERA' and ob.select)] chunks = [] if not cams: camera = DEFAULTS["camera"] if camera["type"] == "perspective": camera_string = TEMPLATE_CAMERA_PERSPECTIVE % { "camera_id" : generate_string(camera["name"]), "fov" : camera["fov"], "aspect" : camera["aspect"], "near" : camera["near"], "far" : camera["far"], "position" : generate_vec3(camera["position"]), "target" : generate_vec3(camera["target"]) } elif camera["type"] == "ortho": camera_string = TEMPLATE_CAMERA_ORTHO % { "camera_id" : generate_string(camera["name"]), "left" : camera["left"], "right" : camera["right"], "top" : camera["top"], "bottom" : camera["bottom"], "near" : camera["near"], "far" : camera["far"], "position" : generate_vec3(camera["position"]), "target" : generate_vec3(camera["target"]) } chunks.append(camera_string) else: for cameraobj in cams: camera = bpy.data.cameras[cameraobj.name] # TODO: # Support more than perspective camera # Calculate a target/lookat # Get correct aspect ratio if camera.id_data.type == "PERSP": camera_string = TEMPLATE_CAMERA_PERSPECTIVE % { "camera_id" : generate_string(camera.name), "fov" : (camera.angle / 3.14) * 180.0, "aspect" : 1.333, "near" : camera.clip_start, "far" : camera.clip_end, "position" : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]]), "target" : generate_vec3([0, 0, 0]) } chunks.append(camera_string) return ",\n\n".join(chunks) return "" # ##################################################### # Scene exporter - lights # ##################################################### def generate_lights(data): if data["use_lights"]: lights = data.get("lights", []) if not lights: lights.append(DEFAULTS["light"]) chunks = [] for light in lights: if light["type"] == "directional": light_string = TEMPLATE_LIGHT_DIRECTIONAL % { "light_id" : generate_string(light["name"]), "direction" : generate_vec3(light["direction"]), "color" : rgb2int(light["color"]), "intensity" : light["intensity"] } elif light["type"] == "point": light_string = TEMPLATE_LIGHT_POINT % { "light_id" : generate_string(light["name"]), "position" : generate_vec3(light["position"]), "color" : rgb2int(light["color"]), "intensity" : light["intensity"] } chunks.append(light_string) return ",\n\n".join(chunks) return "" # ##################################################### # Scene exporter - embedded meshes # ##################################################### def generate_embeds(data): if data["embed_meshes"]: chunks = [] for e in data["embeds"]: embed = '"emb_%s": {%s}' % (e, data["embeds"][e]) chunks.append(embed) return ",\n\n".join(chunks) return "" # ##################################################### # Scene exporter - generate ASCII scene # ##################################################### def generate_ascii_scene(data): objects, nobjects = generate_objects(data) geometries, ngeometries = generate_geometries(data) textures, ntextures = generate_textures_scene(data) materials, nmaterials = generate_materials_scene(data) cameras = generate_cameras(data) lights = generate_lights(data) embeds = generate_embeds(data) basetype = "relativeTo" if data["base_html"]: basetype += "HTML" else: basetype += "Scene" sections = [ ["objects", objects], ["geometries", geometries], ["textures", textures], ["materials", materials], ["cameras", cameras], ["lights", lights], ["embeds", embeds] ] chunks = [] for label, content in sections: if content: chunks.append(generate_section(label, content)) sections_string = "\n".join(chunks) default_camera = "" if data["use_cameras"]: cams = [ob for ob in bpy.data.objects if (ob.type == 'CAMERA' and ob.select)] if not cams: default_camera = "default_camera" else: default_camera = cams[0].name parameters = { "fname" : data["source_file"], "sections" : sections_string, "bgcolor" : generate_vec3(DEFAULTS["bgcolor"]), "bgalpha" : DEFAULTS["bgalpha"], "defcamera" : generate_string(default_camera), "nobjects" : nobjects, "ngeometries" : ngeometries, "ntextures" : ntextures, "basetype" : generate_string(basetype), "nmaterials" : nmaterials, "position" : generate_vec3(DEFAULTS["position"]), "rotation" : generate_vec3(DEFAULTS["rotation"]), "scale" : generate_vec3(DEFAULTS["scale"]) } text = TEMPLATE_SCENE_ASCII % parameters return text def export_scene(scene, filepath, flipyz, option_colors, option_lights, option_cameras, option_embed_meshes, embeds, option_url_base_html, option_copy_textures): source_file = os.path.basename(bpy.data.filepath) scene_text = "" data = { "scene" : scene, "objects" : scene.objects, "embeds" : embeds, "source_file" : source_file, "filepath" : filepath, "flipyz" : flipyz, "use_colors" : option_colors, "use_lights" : option_lights, "use_cameras" : option_cameras, "embed_meshes" : option_embed_meshes, "base_html" : option_url_base_html, "copy_textures": option_copy_textures } scene_text += generate_ascii_scene(data) write_file(filepath, scene_text) # ##################################################### # Main # ##################################################### def save(operator, context, filepath = "", option_flip_yz = True, option_vertices = True, option_vertices_truncate = False, option_faces = True, option_normals = True, option_uv_coords = True, option_materials = True, option_colors = True, align_model = 0, option_export_scene = False, option_lights = False, option_cameras = False, option_scale = 1.0, option_embed_meshes = True, option_url_base_html = False, option_copy_textures = False, option_animation = False, option_frame_step = 1, option_all_meshes = True): #print("URL TYPE", option_url_base_html) filepath = ensure_extension(filepath, '.js') scene = context.scene if scene.objects.active: bpy.ops.object.mode_set(mode='OBJECT') if option_all_meshes: objects = scene.objects else: objects = context.selected_objects if option_export_scene: geo_set = set() embeds = {} for object in objects: if object.type == "MESH" and object.THREE_exportGeometry: # create extra copy of geometry with applied modifiers # (if they exist) if len(object.modifiers) > 0: name = object.name # otherwise can share geometry else: name = object.data.name if name not in geo_set: if option_embed_meshes: text, model_string = generate_mesh_string([object], scene, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, False, # align_model option_flip_yz, option_scale, False, # export_single_model False, # option_copy_textures filepath, option_animation, option_frame_step) embeds[name] = model_string else: fname = generate_mesh_filename(name, filepath) export_mesh([object], scene, fname, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, False, # align_model option_flip_yz, option_scale, False, # export_single_model option_copy_textures, option_animation, option_frame_step) geo_set.add(name) export_scene(scene, filepath, option_flip_yz, option_colors, option_lights, option_cameras, option_embed_meshes, embeds, option_url_base_html, option_copy_textures) else: export_mesh(objects, scene, filepath, option_vertices, option_vertices_truncate, option_faces, option_normals, option_uv_coords, option_materials, option_colors, align_model, option_flip_yz, option_scale, True, # export_single_model option_copy_textures, option_animation, option_frame_step) return {'FINISHED'}
from toontown.toonbase.ToontownBattleGlobals import * import types from direct.fsm import StateData from direct.fsm import ClassicFSM, State from direct.fsm import State import TownBattleAttackPanelNEW import TownBattleAttackPanelOLD import TownBattleWaitPanel import TownBattleChooseAvatarPanel import TownBattleSOSPanelNEW import TownBattleSOSPetSearchPanel import TownBattleSOSPetInfoPanel import TownBattleToonPanel import TownBattleCogPanel from toontown.toontowngui import TTDialog from direct.directnotify import DirectNotifyGlobal from toontown.battle import BattleBase from toontown.toonbase import ToontownTimer from direct.showbase import PythonUtil from toontown.toonbase import TTLocalizer from toontown.pets import PetConstants from direct.gui.DirectGui import DGG from toontown.battle import FireCogPanel class TownBattleNEW(StateData.StateData): notify = DirectNotifyGlobal.directNotify.newCategory('TownBattle') evenPos = (0.75, 0.25, -0.25, -0.75) oddPos = (0.5, 0, -0.5) def __init__(self, doneEvent): StateData.StateData.__init__(self, doneEvent) self.numCogs = 1 self.cogs = [] self.creditLevel = None self.luredIndices = [] self.trappedIndices = [] self.numToons = 1 self.toons = [] self.localNum = 0 self.time = 0 self.bldg = 0 self.track = -1 self.level = -1 self.target = 0 self.toonAttacks = [(-1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0)] self.fsm = ClassicFSM.ClassicFSM('TownBattle', [State.State('Off', self.enterOff, self.exitOff, ['Attack']), State.State('Attack', self.enterAttack, self.exitAttack, ['ChooseCog', 'ChooseToon', 'AttackWait', 'Run', 'Fire', 'SOS']), State.State('ChooseCog', self.enterChooseCog, self.exitChooseCog, ['AttackWait', 'Attack']), State.State('AttackWait', self.enterAttackWait, self.exitAttackWait, ['ChooseCog', 'ChooseToon', 'Attack']), State.State('ChooseToon', self.enterChooseToon, self.exitChooseToon, ['AttackWait', 'Attack']), State.State('Run', self.enterRun, self.exitRun, ['Attack']), State.State('SOS', self.enterSOS, self.exitSOS, ['Attack', 'AttackWait', 'SOSPetSearch', 'SOSPetInfo']), State.State('SOSPetSearch', self.enterSOSPetSearch, self.exitSOSPetSearch, ['SOS', 'SOSPetInfo']), State.State('SOSPetInfo', self.enterSOSPetInfo, self.exitSOSPetInfo, ['SOS', 'AttackWait']), State.State('Fire', self.enterFire, self.exitFire, ['Attack', 'AttackWait'])], 'Off', 'Off') self.runPanel = TTDialog.TTDialog(dialogName='TownBattleRunPanel', text=TTLocalizer.TownBattleRun, style=TTDialog.TwoChoice, command=self.__handleRunPanelDone) self.runPanel.hide() self.attackPanelDoneEvent = 'attack-panel-done' if settings['newGui'] == True: self.attackPanel = TownBattleAttackPanelNEW.TownBattleAttackPanelNEW(self.attackPanelDoneEvent) else: self.attackPanel = TownBattleAttackPanelOLD.TownBattleAttackPanelOLD(self.attackPanelDoneEvent) self.waitPanelDoneEvent = 'wait-panel-done' self.waitPanel = TownBattleWaitPanel.TownBattleWaitPanel(self.waitPanelDoneEvent) self.chooseCogPanelDoneEvent = 'choose-cog-panel-done' self.chooseCogPanel = TownBattleChooseAvatarPanel.TownBattleChooseAvatarPanel(self.chooseCogPanelDoneEvent, 0) self.chooseToonPanelDoneEvent = 'choose-toon-panel-done' self.chooseToonPanel = TownBattleChooseAvatarPanel.TownBattleChooseAvatarPanel(self.chooseToonPanelDoneEvent, 1) self.SOSPanelDoneEvent = 'SOS-panel-done' self.SOSPanel = TownBattleSOSPanelNEW.TownBattleSOSPanelNEW(self.SOSPanelDoneEvent) self.SOSPetSearchPanelDoneEvent = 'SOSPetSearch-panel-done' self.SOSPetSearchPanel = TownBattleSOSPetSearchPanel.TownBattleSOSPetSearchPanel(self.SOSPetSearchPanelDoneEvent) self.SOSPetInfoPanelDoneEvent = 'SOSPetInfo-panel-done' self.SOSPetInfoPanel = TownBattleSOSPetInfoPanel.TownBattleSOSPetInfoPanel(self.SOSPetInfoPanelDoneEvent) self.fireCogPanelDoneEvent = 'fire-cog-panel-done' self.FireCogPanel = FireCogPanel.FireCogPanel(self.fireCogPanelDoneEvent) self.cogFireCosts = [None, None, None, None] self.toonPanels = (TownBattleToonPanel.TownBattleToonPanel(0), TownBattleToonPanel.TownBattleToonPanel(1), TownBattleToonPanel.TownBattleToonPanel(2), TownBattleToonPanel.TownBattleToonPanel(3)) self.cogPanels = (TownBattleCogPanel.TownBattleCogPanel(0), TownBattleCogPanel.TownBattleCogPanel(1), TownBattleCogPanel.TownBattleCogPanel(2), TownBattleCogPanel.TownBattleCogPanel(3)) self.timer = ToontownTimer.ToontownTimer() self.timer.reparentTo(base.a2dTopRight) self.timer.setPos(-0.151, 0, -0.158) self.timer.setScale(0.4) self.timer.hide() return def cleanup(self): self.ignore(self.attackPanelDoneEvent) self.unload() del self.fsm self.runPanel.cleanup() del self.runPanel del self.attackPanel del self.waitPanel del self.chooseCogPanel del self.chooseToonPanel del self.SOSPanel del self.FireCogPanel del self.SOSPetSearchPanel del self.SOSPetInfoPanel for toonPanel in self.toonPanels: toonPanel.cleanup() del self.toonPanels for cogPanel in self.cogPanels: cogPanel.cleanup() del self.cogPanels self.timer.destroy() del self.timer del self.cogs del self.toons def enter(self, event, parentFSMState, bldg = 0, creditMultiplier = 1, tutorialFlag = 0): self.parentFSMState = parentFSMState self.parentFSMState.addChild(self.fsm) if not self.isLoaded: self.load() print 'Battle Event %s' % event self.battleEvent = event self.fsm.enterInitialState() base.localAvatar.laffMeter.start() self.numToons = 1 self.numCogs = 1 self.toons = [base.localAvatar.doId] self.toonPanels[0].setLaffMeter(base.localAvatar) self.bldg = bldg self.creditLevel = None self.creditMultiplier = creditMultiplier self.tutorialFlag = tutorialFlag base.localAvatar.inventory.setBattleCreditMultiplier(self.creditMultiplier) base.localAvatar.inventory.setActivateMode('battle', heal=0, bldg=bldg, tutorialFlag=tutorialFlag) self.SOSPanel.bldg = bldg return def exit(self): base.localAvatar.laffMeter.stop() self.parentFSMState.removeChild(self.fsm) del self.parentFSMState base.localAvatar.inventory.setBattleCreditMultiplier(1) def load(self): if self.isLoaded: return self.attackPanel.load() self.waitPanel.load() self.chooseCogPanel.load() self.chooseToonPanel.load() self.SOSPanel.load() if hasattr(base, 'wantPets') and base.wantPets: self.SOSPetSearchPanel.load() self.SOSPetInfoPanel.load() self.isLoaded = 1 def unload(self): if not self.isLoaded: return self.attackPanel.unload() self.waitPanel.unload() self.chooseCogPanel.unload() self.chooseToonPanel.unload() self.FireCogPanel.unload() self.SOSPanel.unload() if hasattr(base, 'wantPets') and base.wantPets: self.SOSPetSearchPanel.unload() self.SOSPetInfoPanel.unload() self.isLoaded = 0 def setState(self, state): if hasattr(self, 'fsm'): self.fsm.request(state) def updateTimer(self, time): self.time = time self.timer.setTime(time) return None def __cogPanels(self, num): for panel in self.cogPanels: panel.hide() panel.setPos(0, 0, 0.615) if num == 1: self.cogPanels[0].setX(self.oddPos[1]) self.cogPanels[0].show() elif num == 2: for i in xrange(2): self.cogPanels[i].setX(self.evenPos[i + 1]) self.cogPanels[i].show() elif num == 3: for i in xrange(3): self.cogPanels[i].setX(self.oddPos[i]) self.cogPanels[i].show() elif num == 4: for i in xrange(4): self.cogPanels[i].setX(self.evenPos[i]) self.cogPanels[i].show() def __enterPanels(self, num, localNum): self.notify.debug('enterPanels() num: %d localNum: %d' % (num, localNum)) for toonPanel in self.toonPanels: toonPanel.hide() toonPanel.setPos(0, 0, -0.3) if num == 1: self.toonPanels[0].setX(self.oddPos[1]) self.toonPanels[0].show() elif num == 2: self.toonPanels[0].setX(self.evenPos[1]) self.toonPanels[0].show() self.toonPanels[1].setX(self.evenPos[2]) self.toonPanels[1].show() elif num == 3: self.toonPanels[0].setX(self.oddPos[0]) self.toonPanels[0].show() self.toonPanels[1].setX(self.oddPos[1]) self.toonPanels[1].show() self.toonPanels[2].setX(self.oddPos[2]) self.toonPanels[2].show() elif num == 4: self.toonPanels[0].setX(self.evenPos[0]) self.toonPanels[0].show() self.toonPanels[1].setX(self.evenPos[1]) self.toonPanels[1].show() self.toonPanels[2].setX(self.evenPos[2]) self.toonPanels[2].show() self.toonPanels[3].setX(self.evenPos[3]) self.toonPanels[3].show() else: self.notify.error('Bad number of toons: %s' % num) return None def updateChosenAttacks(self, battleIndices, tracks, levels, targets): self.notify.debug('updateChosenAttacks bi=%s tracks=%s levels=%s targets=%s' % (battleIndices, tracks, levels, targets)) for i in range(4): if battleIndices[i] == -1: pass else: if tracks[i] == BattleBase.NO_ATTACK: numTargets = 0 target = -2 elif tracks[i] == BattleBase.PASS_ATTACK: numTargets = 0 target = -2 elif tracks[i] == BattleBase.SOS or tracks[i] == BattleBase.NPCSOS or tracks[i] == BattleBase.PETSOS: numTargets = 0 target = -2 elif tracks[i] == HEAL_TRACK: numTargets = self.numToons if self.__isGroupHeal(levels[i]): target = -2 else: target = targets[i] else: numTargets = self.numCogs if self.__isGroupAttack(tracks[i], levels[i]): target = -1 else: target = targets[i] if target == -1: numTargets = None self.toonPanels[battleIndices[i]].setValues(battleIndices[i], tracks[i], levels[i], numTargets, target, self.localNum) return def chooseDefaultTarget(self): if self.track > -1: response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level response['target'] = self.target messenger.send(self.battleEvent, [response]) return 1 return 0 def updateLaffMeter(self, toonNum, hp): self.toonPanels[toonNum].updateLaffMeter(hp) def enterOff(self): if self.isLoaded: for toonPanel in self.toonPanels: toonPanel.hide() for cogPanel in self.cogPanels: cogPanel.hide() self.toonAttacks = [(-1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0)] self.target = 0 if hasattr(self, 'timer'): self.timer.hide() return None def exitOff(self): if self.isLoaded: self.__enterPanels(self.numToons, self.localNum) self.__cogPanels(self.numCogs) self.timer.show() self.track = -1 self.level = -1 self.target = 0 return None def enterAttack(self): self.attackPanel.enter() self.accept(self.attackPanelDoneEvent, self.__handleAttackPanelDone) return None def exitAttack(self): self.ignore(self.attackPanelDoneEvent) self.attackPanel.exit() return None def __handleAttackPanelDone(self, doneStatus): self.notify.debug('doneStatus: %s' % doneStatus) mode = doneStatus['mode'] if mode == 'Inventory': self.track = doneStatus['track'] self.level = doneStatus['level'] self.toonPanels[self.localNum].setValues(self.localNum, self.track, self.level) if self.track == HEAL_TRACK: if self.__isGroupHeal(self.level): response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level response['target'] = self.target messenger.send(self.battleEvent, [response]) self.fsm.request('AttackWait') elif self.numToons == 3 or self.numToons == 4: self.fsm.request('ChooseToon') elif self.numToons == 2: response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level if self.localNum == 0: response['target'] = 1 elif self.localNum == 1: response['target'] = 0 else: self.notify.error('Bad localNum value: %s' % self.localNum) messenger.send(self.battleEvent, [response]) self.fsm.request('AttackWait') else: self.notify.error('Heal was chosen when number of toons is %s' % self.numToons) elif self.__isCogChoiceNecessary(): self.notify.debug('choice needed') self.fsm.request('ChooseCog') response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level response['target'] = -1 messenger.send(self.battleEvent, [response]) else: self.notify.debug('no choice needed') self.fsm.request('AttackWait') response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level response['target'] = 0 messenger.send(self.battleEvent, [response]) elif mode == 'Run': self.fsm.request('Run') elif mode == 'SOS': self.fsm.request('SOS') elif mode == 'Fire': self.fsm.request('Fire') elif mode == 'Pass': response = {} response['mode'] = 'Pass' response['id'] = -1 messenger.send(self.battleEvent, [response]) self.fsm.request('AttackWait') else: self.notify.warning('unknown mode: %s' % mode) def checkHealTrapLure(self): self.notify.debug('numToons: %s, numCogs: %s, lured: %s, trapped: %s' % (self.numToons, self.numCogs, self.luredIndices, self.trappedIndices)) if len(PythonUtil.union(self.trappedIndices, self.luredIndices)) == self.numCogs: canTrap = 0 else: canTrap = 1 if len(self.luredIndices) == self.numCogs: canLure = 0 canTrap = 0 else: canLure = 1 if self.numToons == 1: canHeal = 0 else: canHeal = 1 return (canHeal, canTrap, canLure) def adjustCogsAndToons(self, cogs, luredIndices, trappedIndices, toons): cogIds = map(lambda cog: cog.doId, cogs) self.notify.debug('adjustCogsAndToons() cogIds: %s self.cogs: %s' % (cogIds, self.cogs)) self.notify.debug('adjustCogsAndToons() luredIndices: %s self.luredIndices: %s' % (luredIndices, self.luredIndices)) self.notify.debug('adjustCogsAndToons() trappedIndices: %s self.trappedIndices: %s' % (trappedIndices, self.trappedIndices)) toonIds = map(lambda toon: toon.doId, toons) self.notify.debug('adjustCogsAndToons() toonIds: %s self.toons: %s' % (toonIds, self.toons)) maxSuitLevel = 0 cogFireCostIndex = 0 for cog in cogs: maxSuitLevel = max(maxSuitLevel, cog.getActualLevel()) self.cogFireCosts[cogFireCostIndex] = 1 cogFireCostIndex += 1 creditLevel = maxSuitLevel if cogIds == self.cogs and creditLevel == self.creditLevel and luredIndices == self.luredIndices and trappedIndices == self.trappedIndices and toonIds == self.toons: resetActivateMode = 0 else: resetActivateMode = 1 self.notify.debug('adjustCogsAndToons() resetActivateMode: %s' % resetActivateMode) self.cogs = cogIds self.numCogs = len(cogs) self.creditLevel = creditLevel self.luredIndices = luredIndices self.trappedIndices = trappedIndices self.toons = toonIds self.numToons = len(toons) self.localNum = toons.index(base.localAvatar) currStateName = self.fsm.getCurrentState().getName() if resetActivateMode: self.__enterPanels(self.numToons, self.localNum) self.__cogPanels(self.numCogs) for i in range(len(toons)): self.toonPanels[i].setLaffMeter(toons[i]) for i in range(len(cogs)): self.cogPanels[i].setCogInformation(cogs[i]) if currStateName == 'ChooseCog': self.chooseCogPanel.adjustCogs(self.numCogs, self.luredIndices, self.trappedIndices, self.track) elif currStateName == 'ChooseToon': self.chooseToonPanel.adjustToons(self.numToons, self.localNum) canHeal, canTrap, canLure = self.checkHealTrapLure() base.localAvatar.inventory.setBattleCreditMultiplier(self.creditMultiplier) base.localAvatar.inventory.setActivateMode('battle', heal=canHeal, trap=canTrap, lure=canLure, bldg=self.bldg, creditLevel=self.creditLevel, tutorialFlag=self.tutorialFlag) def enterChooseCog(self): self.cog = 0 self.chooseCogPanel.enter(self.numCogs, luredIndices=self.luredIndices, trappedIndices=self.trappedIndices, track=self.track) self.accept(self.chooseCogPanelDoneEvent, self.__handleChooseCogPanelDone) return None def exitChooseCog(self): self.ignore(self.chooseCogPanelDoneEvent) self.chooseCogPanel.exit() return None def __handleChooseCogPanelDone(self, doneStatus): mode = doneStatus['mode'] if mode == 'Back': self.fsm.request('Attack') elif mode == 'Avatar': self.cog = doneStatus['avatar'] self.target = self.cog self.fsm.request('AttackWait') response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level response['target'] = self.cog messenger.send(self.battleEvent, [response]) else: self.notify.warning('unknown mode: %s' % mode) def enterAttackWait(self, chosenToon = -1): self.accept(self.waitPanelDoneEvent, self.__handleAttackWaitBack) self.waitPanel.enter(self.numToons) def exitAttackWait(self): self.waitPanel.exit() self.ignore(self.waitPanelDoneEvent) def __handleAttackWaitBack(self, doneStatus): mode = doneStatus['mode'] if mode == 'Back': if self.track == HEAL_TRACK: self.fsm.request('Attack') elif self.track == BattleBase.NO_ATTACK: self.fsm.request('Attack') elif self.__isCogChoiceNecessary(): self.fsm.request('ChooseCog') else: self.fsm.request('Attack') response = {} response['mode'] = 'UnAttack' messenger.send(self.battleEvent, [response]) else: self.notify.error('unknown mode: %s' % mode) def enterChooseToon(self): self.toon = 0 self.chooseToonPanel.enter(self.numToons, localNum=self.localNum) self.accept(self.chooseToonPanelDoneEvent, self.__handleChooseToonPanelDone) return None def exitChooseToon(self): self.ignore(self.chooseToonPanelDoneEvent) self.chooseToonPanel.exit() return None def __handleChooseToonPanelDone(self, doneStatus): mode = doneStatus['mode'] if mode == 'Back': self.fsm.request('Attack') elif mode == 'Avatar': self.toon = doneStatus['avatar'] self.target = self.toon self.fsm.request('AttackWait', [self.toon]) response = {} response['mode'] = 'Attack' response['track'] = self.track response['level'] = self.level response['target'] = self.toon messenger.send(self.battleEvent, [response]) else: self.notify.warning('unknown mode: %s' % mode) def enterRun(self): self.runPanel.show() def exitRun(self): self.runPanel.hide() def __handleRunPanelDone(self, doneStatus): if doneStatus == DGG.DIALOG_OK: response = {} response['mode'] = 'Run' messenger.send(self.battleEvent, [response]) else: self.fsm.request('Attack') def enterFire(self): canHeal, canTrap, canLure = self.checkHealTrapLure() self.FireCogPanel.enter(self.numCogs, luredIndices=self.luredIndices, trappedIndices=self.trappedIndices, track=self.track, fireCosts=self.cogFireCosts) self.accept(self.fireCogPanelDoneEvent, self.__handleCogFireDone) return None def exitFire(self): self.ignore(self.fireCogPanelDoneEvent) self.FireCogPanel.exit() return None def __handleCogFireDone(self, doneStatus): mode = doneStatus['mode'] if mode == 'Back': self.fsm.request('Attack') elif mode == 'Avatar': self.cog = doneStatus['avatar'] self.target = self.cog self.fsm.request('AttackWait') response = {} response['mode'] = 'Fire' response['target'] = self.cog messenger.send(self.battleEvent, [response]) else: self.notify.warning('unknown mode: %s' % mode) def enterSOS(self): canHeal, canTrap, canLure = self.checkHealTrapLure() self.SOSPanel.enter(canLure, canTrap) for panel in self.toonPanels: panel.stash() self.accept(self.SOSPanelDoneEvent, self.__handleSOSPanelDone) return None def exitSOS(self): self.ignore(self.SOSPanelDoneEvent) self.SOSPanel.exit() for panel in self.toonPanels: panel.unstash() return None def __handleSOSPanelDone(self, doneStatus): mode = doneStatus['mode'] if mode == 'Friend': doId = doneStatus['friend'] response = {} response['mode'] = 'SOS' response['id'] = doId messenger.send(self.battleEvent, [response]) self.fsm.request('AttackWait') elif mode == 'Pet': self.petId = doneStatus['petId'] self.petName = doneStatus['petName'] self.fsm.request('SOSPetSearch') elif mode == 'NPCFriend': doId = doneStatus['friend'] response = {} response['mode'] = 'NPCSOS' response['id'] = doId messenger.send(self.battleEvent, [response]) self.fsm.request('AttackWait') elif mode == 'Back': self.fsm.request('Attack') def enterSOSPetSearch(self): response = {} response['mode'] = 'PETSOSINFO' response['id'] = self.petId self.SOSPetSearchPanel.enter(self.petId, self.petName) self.proxyGenerateMessage = 'petProxy-%d-generated' % self.petId self.accept(self.proxyGenerateMessage, self.__handleProxyGenerated) self.accept(self.SOSPetSearchPanelDoneEvent, self.__handleSOSPetSearchPanelDone) messenger.send(self.battleEvent, [response]) return None def exitSOSPetSearch(self): self.ignore(self.proxyGenerateMessage) self.ignore(self.SOSPetSearchPanelDoneEvent) self.SOSPetSearchPanel.exit() return None def __handleSOSPetSearchPanelDone(self, doneStatus): mode = doneStatus['mode'] if mode == 'Back': self.fsm.request('SOS') else: self.notify.error('invalid mode in handleSOSPetSearchPanelDone') def __handleProxyGenerated(self): self.fsm.request('SOSPetInfo') def enterSOSPetInfo(self): self.SOSPetInfoPanel.enter(self.petId) self.accept(self.SOSPetInfoPanelDoneEvent, self.__handleSOSPetInfoPanelDone) return None def exitSOSPetInfo(self): self.ignore(self.SOSPetInfoPanelDoneEvent) self.SOSPetInfoPanel.exit() return None def __handleSOSPetInfoPanelDone(self, doneStatus): mode = doneStatus['mode'] if mode == 'OK': response = {} response['mode'] = 'PETSOS' response['id'] = self.petId response['trickId'] = doneStatus['trickId'] messenger.send(self.battleEvent, [response]) self.fsm.request('AttackWait') bboard.post(PetConstants.OurPetsMoodChangedKey, True) elif mode == 'Back': self.fsm.request('SOS') def __isCogChoiceNecessary(self): if self.numCogs > 1 and not self.__isGroupAttack(self.track, self.level): return 1 else: return 0 def __isGroupAttack(self, trackNum, levelNum): retval = BattleBase.attackAffectsGroup(trackNum, levelNum) return retval def __isGroupHeal(self, levelNum): retval = BattleBase.attackAffectsGroup(HEAL_TRACK, levelNum) return retval
""" View functions for tag related things This includes both viewing the tag list, and manipulating tags on other things. """ from django.contrib.auth.decorators import login_required from django.core.exceptions import SuspiciousOperation from django.http import HttpResponse from django.shortcuts import get_object_or_404 from django.template import defaultfilters from django.template.response import TemplateResponse from django.views.decorators.http import require_POST from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger import six from tags.models import Tag from tags.forms import AddTagForm, RemoveTagForm, RenameTagForm, PinTagForm import tags from tags import makeslug from bookmarks.models import Bookmark from .templatetags.tag import tagBlock from bmat.utils import make_page import json @login_required def home(request): """ Uses index.html to display a list of all the user's tags """ ctx = {} ctx["area"] = "tags" ctx["tags"] = make_page(Tag.by_user(request.user), request.GET.get("p")) ctx["untag_count"] = Bookmark.by_user(request.user).filter(tags=None).count() return TemplateResponse(request, "tags/index.html", ctx) @login_required def filter(request, tag): """ Given a slug, uses filter.html to display all the things tagged with that specific tag """ tag = get_object_or_404(Tag, owner=request.user, slug=makeslug(tag)) ctx = {} if not tag.pinned: # Don't display "tags" thing as active when the tag is pinned ctx["area"] = "tags" ctx["tag"] = tag ctx["pin_form"] = PinTagForm(instance=tag) ctx["bookmarks"] = make_page(Bookmark.get_by_tag(tag), request.GET.get("p")) return TemplateResponse(request, "tags/filter.html", ctx) @login_required def untagged(request): """ Given a slug, uses filter.html to display all the things tagged with that specific tag """ ctx = {} bookmarks = Bookmark.by_user(request.user).filter(tags=None) ctx["untag_count"] = bookmarks.count() ctx["area"] = "tags" ctx["tag"] = None ctx["bookmarks"] = make_page(bookmarks, request.GET.get("p")) return TemplateResponse(request, "tags/filter.html", ctx) @login_required def suggest(request, value): """ Returns a JSON object containing tag suggestions for the sepecified value The JSON object contains two values: - yours: The string that was submitted to this page - tags: An array of strings for the suggestions """ tags = Tag.objects.filter(owner=request.user, slug__startswith=makeslug(value))[:10] return TemplateResponse(request, "tags/suggest.json", {"tags":tags, "value":value}, "application/json") @login_required @require_POST def tag(request): """ Tags a thing using an AddTagForm Returns a JSON response with an "obj" and "key" porperty. "obj" is the object that was tagged, while "type" is it's type. If there is an error, a JSON object with an "error" key is returned instead. """ f = AddTagForm(request.POST) if not f.is_valid(): return HttpResponse('{"error":"Form invalid"}', content_type="application/json", status=422) taggable = tags.lookup_taggable(f.cleaned_data["type"]) if taggable is None: return HttpResponse('{"error":"Taggable type invalid"}', content_type="application/json", status=422) try: obj = taggable.objects.get(owner=request.user, pk=f.cleaned_data["pk"]) except taggable.DoesNotExist: return HttpResponse('{"error":"Taggable not found"}', content_type="application/json", status=422) tag_names = map(lambda x: x.strip(), f.cleaned_data["name"].split(",")) for n in tag_names: if not n: continue try: tag = Tag.objects.get(owner=request.user, slug=makeslug(n)) except Tag.DoesNotExist: tag = Tag(owner=request.user, name=n, colour=f.instance.colour) tag.save() obj.tag(tag) return HttpResponse( '{{"obj":{}, "type":"{}"}}'.format(obj.to_json(), f.cleaned_data["type"]), content_type="application/json" ) @login_required @require_POST def untag(request): """ Untags a thing using a RemoveTagForm Returns a JSON response with a "deleted" and "key" porperty. "deleted" is the primary key of the object that had its tag removed, while "type" is it's type. If there is an error, a JSON object with an "error" key is returned instead. """ f = RemoveTagForm(request.POST) if not f.is_valid(): return HttpResponse('{"error":"Form invalid"}', content_type="application/json", status=422) taggable = tags.lookup_taggable(f.cleaned_data["type"]) if taggable is None: return HttpResponse('{"error":"Taggable type invalid"}', content_type="application/json", status=422) try: obj = get_object_or_404(taggable, owner=request.user, pk=f.cleaned_data["target_pk"]) except taggable.DoesNotExist: return HttpResponse('{"error":"Taggable not found"}', content_type="application/json", status=422) try: tag = Tag.objects.get(owner=request.user, pk=f.cleaned_data["tag_pk"]) except Tag.DoesNotExist: return HttpResponse('{"error":"Tag to remove not found"}', content_type="application/json", status=422) obj.tags.remove(tag) return HttpResponse( '{{"deleted":{}, "type":"{}"}}'.format(obj.pk, f.cleaned_data["type"]), content_type="application/json" ) @login_required @require_POST def restore(request): """ Given the undoable json representation of a bookmark, performs the undo, recovering the tag The tag must have been from "undoable_json", and must be the "obj" POST value. If it succeeds it returns a JSON object with "obj" being the JSON representation of the bookmark, "type" which is always "tag" and "id" which is the id of the newly created bookmark. """ if "obj" not in request.POST: raise SuspiciousOperation try: tag = Tag.from_undoable(request.POST.get("obj"), request.user) except Exception: raise SuspiciousOperation out = {} out["type"] = "tag" out["obj"] = tag.to_json() out["id"] = tag.pk return HttpResponse(json.dumps(out), content_type="application/json") @login_required @require_POST def delete(request): """ Deletes a tag The primary key of the tag must be specified with the "tag" POST value. If the tag doesn't exist, nothing happens. This returns a JSON object with the following properties: deleted: The primary key of the deleted tag, or null if it didn't exist. obj: The JSON representation of the deleted tag, or null if it didn't exist. alreadyDeleted: True if and only if the tag was deleted before this request. type: Always "tag". """ if "tag" not in request.POST: raise SuspiciousOperation try: tag = Tag.objects.get(owner=request.user, pk=request.POST["tag"]) except Tag.DoesNotExist: return HttpResponse( '{"deleted":null, "obj":null, "alreadyDeleted":true, "type":"tag"}', content_type="application/json" ) id = tag.pk json = tag.undoable_json() tag.delete() return HttpResponse( '{"deleted":'+str(id)+', "obj":'+json+', "alreadyDeleted":false, "type":"tag"}', content_type="application/json" ) @login_required def htmlBlock(request, tag): """ Outputs the HTML for a tag block, such as for the tags list page This uses the tagBlock.html temlpate. """ tag = get_object_or_404(Tag, pk=tag, owner=request.user) return TemplateResponse(request, "tags/tagBlock.html", tagBlock(tag)) @login_required @require_POST def rename(request, tag): """ Renames a tag using a RenameTagForm If successfull, outputs a JSON object with "obj", "type" and "pooled" properties. "obj" is the JSON object of the tag that was renamed, while "type" will always be "tag". pooled is true when the tag has been renamed to that of an existing tag, in which case "obj" will be that tag. If it fails, a JSON object with an "error" value will be returned. """ tagObj = get_object_or_404(Tag, owner=request.user, slug=tag) form = RenameTagForm(request.POST, instance=tagObj) if not form.is_valid(): return HttpResponse('{"error":"Form invalid"}', content_type="application/json", status=422) if Tag.objects.filter(owner=request.user, slug=makeslug(form.data["name"])).exclude(pk=form.instance.pk).exists(): # Tag exists, pool them existing = Tag.objects.get(owner=request.user, slug=makeslug(form.data["name"])) form.instance.pool_into(existing) form.instance.delete() tagObj = existing pooled = "true" else: form.save() pooled = "false" return HttpResponse('{"obj":'+tagObj.to_json()+', "type":"tag", "pooled":'+pooled+'}', content_type="application/json") @login_required @require_POST def pin(request, tag): """ Updates a tags pinning using a PinTagForm If successfull, outputs a JSON object with "obj" and "type" properties. "obj" is the JSON object of the tag that was renamed, while "type" will always be "tag". If it fails, a JSON object with an "error" value will be returned. """ tagObj = get_object_or_404(Tag, owner=request.user, slug=tag) form = PinTagForm(request.POST, instance=tagObj) if not form.is_valid(): return HttpResponse('{"error":"Form invalid"}', content_type="application/json", status=422) form.save() return HttpResponse('{"obj":'+tagObj.to_json()+', "type":"tag"}', content_type="application/json")
from dataclasses import field, Field, dataclass from enum import Enum from itertools import chain from typing import ( Any, List, Optional, Dict, Union, Type, TypeVar, Callable ) from dbt.dataclass_schema import ( dbtClassMixin, ValidationError, register_pattern, ) from dbt.contracts.graph.unparsed import AdditionalPropertiesAllowed from dbt.exceptions import InternalException, CompilationException from dbt.contracts.util import Replaceable, list_str from dbt import hooks from dbt.node_types import NodeType M = TypeVar('M', bound='Metadata') def _get_meta_value(cls: Type[M], fld: Field, key: str, default: Any) -> M: # a metadata field might exist. If it does, it might have a matching key. # If it has both, make sure the value is valid and return it. If it # doesn't, return the default. if fld.metadata: value = fld.metadata.get(key, default) else: value = default try: return cls(value) except ValueError as exc: raise InternalException( f'Invalid {cls} value: {value}' ) from exc def _set_meta_value( obj: M, key: str, existing: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: if existing is None: result = {} else: result = existing.copy() result.update({key: obj}) return result class Metadata(Enum): @classmethod def from_field(cls: Type[M], fld: Field) -> M: default = cls.default_field() key = cls.metadata_key() return _get_meta_value(cls, fld, key, default) def meta( self, existing: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: key = self.metadata_key() return _set_meta_value(self, key, existing) @classmethod def default_field(cls) -> 'Metadata': raise NotImplementedError('Not implemented') @classmethod def metadata_key(cls) -> str: raise NotImplementedError('Not implemented') class MergeBehavior(Metadata): Append = 1 Update = 2 Clobber = 3 @classmethod def default_field(cls) -> 'MergeBehavior': return cls.Clobber @classmethod def metadata_key(cls) -> str: return 'merge' class ShowBehavior(Metadata): Show = 1 Hide = 2 @classmethod def default_field(cls) -> 'ShowBehavior': return cls.Show @classmethod def metadata_key(cls) -> str: return 'show_hide' @classmethod def should_show(cls, fld: Field) -> bool: return cls.from_field(fld) == cls.Show class CompareBehavior(Metadata): Include = 1 Exclude = 2 @classmethod def default_field(cls) -> 'CompareBehavior': return cls.Include @classmethod def metadata_key(cls) -> str: return 'compare' @classmethod def should_include(cls, fld: Field) -> bool: return cls.from_field(fld) == cls.Include def metas(*metas: Metadata) -> Dict[str, Any]: existing: Dict[str, Any] = {} for m in metas: existing = m.meta(existing) return existing def _listify(value: Any) -> List: if isinstance(value, list): return value[:] else: return [value] def _merge_field_value( merge_behavior: MergeBehavior, self_value: Any, other_value: Any, ): if merge_behavior == MergeBehavior.Clobber: return other_value elif merge_behavior == MergeBehavior.Append: return _listify(self_value) + _listify(other_value) elif merge_behavior == MergeBehavior.Update: if not isinstance(self_value, dict): raise InternalException(f'expected dict, got {self_value}') if not isinstance(other_value, dict): raise InternalException(f'expected dict, got {other_value}') value = self_value.copy() value.update(other_value) return value else: raise InternalException( f'Got an invalid merge_behavior: {merge_behavior}' ) def insensitive_patterns(*patterns: str): lowercased = [] for pattern in patterns: lowercased.append( ''.join('[{}{}]'.format(s.upper(), s.lower()) for s in pattern) ) return '^({})$'.format('|'.join(lowercased)) class Severity(str): pass register_pattern(Severity, insensitive_patterns('warn', 'error')) @dataclass class Hook(dbtClassMixin, Replaceable): sql: str transaction: bool = True index: Optional[int] = None T = TypeVar('T', bound='BaseConfig') @dataclass class BaseConfig( AdditionalPropertiesAllowed, Replaceable ): # enable syntax like: config['key'] def __getitem__(self, key): return self.get(key) # like doing 'get' on a dictionary def get(self, key, default=None): if hasattr(self, key): return getattr(self, key) elif key in self._extra: return self._extra[key] else: return default # enable syntax like: config['key'] = value def __setitem__(self, key, value): if hasattr(self, key): setattr(self, key, value) else: self._extra[key] = value def __delitem__(self, key): if hasattr(self, key): msg = ( 'Error, tried to delete config key "{}": Cannot delete ' 'built-in keys' ).format(key) raise CompilationException(msg) else: del self._extra[key] def _content_iterator(self, include_condition: Callable[[Field], bool]): seen = set() for fld, _ in self._get_fields(): seen.add(fld.name) if include_condition(fld): yield fld.name for key in self._extra: if key not in seen: seen.add(key) yield key def __iter__(self): yield from self._content_iterator(include_condition=lambda f: True) def __len__(self): return len(self._get_fields()) + len(self._extra) @staticmethod def compare_key( unrendered: Dict[str, Any], other: Dict[str, Any], key: str, ) -> bool: if key not in unrendered and key not in other: return True elif key not in unrendered and key in other: return False elif key in unrendered and key not in other: return False else: return unrendered[key] == other[key] @classmethod def same_contents( cls, unrendered: Dict[str, Any], other: Dict[str, Any] ) -> bool: """This is like __eq__, except it ignores some fields.""" seen = set() for fld, target_name in cls._get_fields(): key = target_name seen.add(key) if CompareBehavior.should_include(fld): if not cls.compare_key(unrendered, other, key): return False for key in chain(unrendered, other): if key not in seen: seen.add(key) if not cls.compare_key(unrendered, other, key): return False return True # This is used in 'add_config_call' to created the combined config_call_dict. # 'meta' moved here from node mergebehavior = { "append": ['pre-hook', 'pre_hook', 'post-hook', 'post_hook', 'tags'], "update": ['quoting', 'column_types', 'meta'], } @classmethod def _merge_dicts( cls, src: Dict[str, Any], data: Dict[str, Any] ) -> Dict[str, Any]: """Find all the items in data that match a target_field on this class, and merge them with the data found in `src` for target_field, using the field's specified merge behavior. Matching items will be removed from `data` (but _not_ `src`!). Returns a dict with the merge results. That means this method mutates its input! Any remaining values in data were not merged. """ result = {} for fld, target_field in cls._get_fields(): if target_field not in data: continue data_attr = data.pop(target_field) if target_field not in src: result[target_field] = data_attr continue merge_behavior = MergeBehavior.from_field(fld) self_attr = src[target_field] result[target_field] = _merge_field_value( merge_behavior=merge_behavior, self_value=self_attr, other_value=data_attr, ) return result def update_from( self: T, data: Dict[str, Any], adapter_type: str, validate: bool = True ) -> T: """Given a dict of keys, update the current config from them, validate it, and return a new config with the updated values """ # sadly, this is a circular import from dbt.adapters.factory import get_config_class_by_name dct = self.to_dict(omit_none=False) adapter_config_cls = get_config_class_by_name(adapter_type) self_merged = self._merge_dicts(dct, data) dct.update(self_merged) adapter_merged = adapter_config_cls._merge_dicts(dct, data) dct.update(adapter_merged) # any remaining fields must be "clobber" dct.update(data) # any validation failures must have come from the update if validate: self.validate(dct) return self.from_dict(dct) def finalize_and_validate(self: T) -> T: dct = self.to_dict(omit_none=False) self.validate(dct) return self.from_dict(dct) def replace(self, **kwargs): dct = self.to_dict(omit_none=True) mapping = self.field_mapping() for key, value in kwargs.items(): new_key = mapping.get(key, key) dct[new_key] = value return self.from_dict(dct) @dataclass class SourceConfig(BaseConfig): enabled: bool = True @dataclass class NodeAndTestConfig(BaseConfig): enabled: bool = True # these fields are included in serialized output, but are not part of # config comparison (they are part of database_representation) alias: Optional[str] = field( default=None, metadata=CompareBehavior.Exclude.meta(), ) schema: Optional[str] = field( default=None, metadata=CompareBehavior.Exclude.meta(), ) database: Optional[str] = field( default=None, metadata=CompareBehavior.Exclude.meta(), ) tags: Union[List[str], str] = field( default_factory=list_str, metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude), ) meta: Dict[str, Any] = field( default_factory=dict, metadata=MergeBehavior.Update.meta(), ) @dataclass class NodeConfig(NodeAndTestConfig): # Note: if any new fields are added with MergeBehavior, also update the # 'mergebehavior' dictionary materialized: str = 'view' persist_docs: Dict[str, Any] = field(default_factory=dict) post_hook: List[Hook] = field( default_factory=list, metadata=MergeBehavior.Append.meta(), ) pre_hook: List[Hook] = field( default_factory=list, metadata=MergeBehavior.Append.meta(), ) quoting: Dict[str, Any] = field( default_factory=dict, metadata=MergeBehavior.Update.meta(), ) # This is actually only used by seeds. Should it be available to others? # That would be a breaking change! column_types: Dict[str, Any] = field( default_factory=dict, metadata=MergeBehavior.Update.meta(), ) full_refresh: Optional[bool] = None on_schema_change: Optional[str] = 'ignore' @classmethod def __pre_deserialize__(cls, data): data = super().__pre_deserialize__(data) field_map = {'post-hook': 'post_hook', 'pre-hook': 'pre_hook'} # create a new dict because otherwise it gets overwritten in # tests new_dict = {} for key in data: new_dict[key] = data[key] data = new_dict for key in hooks.ModelHookType: if key in data: data[key] = [hooks.get_hook_dict(h) for h in data[key]] for field_name in field_map: if field_name in data: new_name = field_map[field_name] data[new_name] = data.pop(field_name) return data def __post_serialize__(self, dct): dct = super().__post_serialize__(dct) field_map = {'post_hook': 'post-hook', 'pre_hook': 'pre-hook'} for field_name in field_map: if field_name in dct: dct[field_map[field_name]] = dct.pop(field_name) return dct # this is still used by jsonschema validation @classmethod def field_mapping(cls): return {'post_hook': 'post-hook', 'pre_hook': 'pre-hook'} @dataclass class SeedConfig(NodeConfig): materialized: str = 'seed' quote_columns: Optional[bool] = None @dataclass class TestConfig(NodeAndTestConfig): # this is repeated because of a different default schema: Optional[str] = field( default='dbt_test__audit', metadata=CompareBehavior.Exclude.meta(), ) materialized: str = 'test' severity: Severity = Severity('ERROR') store_failures: Optional[bool] = None where: Optional[str] = None limit: Optional[int] = None fail_calc: str = 'count(*)' warn_if: str = '!= 0' error_if: str = '!= 0' @classmethod def same_contents( cls, unrendered: Dict[str, Any], other: Dict[str, Any] ) -> bool: """This is like __eq__, except it explicitly checks certain fields.""" modifiers = [ 'severity', 'where', 'limit', 'fail_calc', 'warn_if', 'error_if', 'store_failures' ] seen = set() for _, target_name in cls._get_fields(): key = target_name seen.add(key) if key in modifiers: if not cls.compare_key(unrendered, other, key): return False return True @dataclass class EmptySnapshotConfig(NodeConfig): materialized: str = 'snapshot' @dataclass class SnapshotConfig(EmptySnapshotConfig): strategy: Optional[str] = None unique_key: Optional[str] = None target_schema: Optional[str] = None target_database: Optional[str] = None updated_at: Optional[str] = None check_cols: Optional[Union[str, List[str]]] = None @classmethod def validate(cls, data): super().validate(data) if not data.get('strategy') or not data.get('unique_key') or not \ data.get('target_schema'): raise ValidationError( "Snapshots must be configured with a 'strategy', 'unique_key', " "and 'target_schema'.") if data.get('strategy') == 'check': if not data.get('check_cols'): raise ValidationError( "A snapshot configured with the check strategy must " "specify a check_cols configuration.") if (isinstance(data['check_cols'], str) and data['check_cols'] != 'all'): raise ValidationError( f"Invalid value for 'check_cols': {data['check_cols']}. " "Expected 'all' or a list of strings.") elif data.get('strategy') == 'timestamp': if not data.get('updated_at'): raise ValidationError( "A snapshot configured with the timestamp strategy " "must specify an updated_at configuration.") if data.get('check_cols'): raise ValidationError( "A 'timestamp' snapshot should not have 'check_cols'") # If the strategy is not 'check' or 'timestamp' it's a custom strategy, # formerly supported with GenericSnapshotConfig def finalize_and_validate(self): data = self.to_dict(omit_none=True) self.validate(data) return self.from_dict(data) RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = { NodeType.Source: SourceConfig, NodeType.Seed: SeedConfig, NodeType.Test: TestConfig, NodeType.Model: NodeConfig, NodeType.Snapshot: SnapshotConfig, } # base resource types are like resource types, except nothing has mandatory # configs. BASE_RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = RESOURCE_TYPES.copy() BASE_RESOURCE_TYPES.update({ NodeType.Snapshot: EmptySnapshotConfig }) def get_config_for(resource_type: NodeType, base=False) -> Type[BaseConfig]: if base: lookup = BASE_RESOURCE_TYPES else: lookup = RESOURCE_TYPES return lookup.get(resource_type, NodeConfig)
# coding: utf-8 """ AlertApi.py Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class AlertApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def create_alert_from_parts(self, name, condition, minutes, notifications, severity, **kwargs): """ Create an alert This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_alert_from_parts(name, condition, minutes, notifications, severity, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: Descriptive name for the alert (required) :param str condition: A query that will trigger the alert if non-zero results are observed for given number of minutes (required) :param int minutes: Number of minutes for the query to return non-zero results before the alert fires. Must be 2 or higher (required) :param str notifications: Up to ten addresses can be listed, separated by commas. Notifications will be sent to all targets on the list. To trigger a PagerDuty incident, specify a \"pd:key\" target with the 32-digit hex API key you created. PagerDuty incidents will be automatically triggered, updated, and resolved. (required) :param str severity: Severity (required) :param str display_expression: An optional query that will be shown when the alert fires. Use this to show a more helpful chart, e.g. the underlying timeseries :param int resolve_minutes: Number of minutes for the query to return 0 as a result before the alert resolves. Defaults to the same as minutes to fire if not set. Must be 2 or higher :param str private_tags: Comma separated list of private tags to be associated with this alert :param str shared_tags: Comma separated list of shared tags to be associated with this alert :param str additional_information: Any additional information to be included with this alert :return: Alert If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'condition', 'minutes', 'notifications', 'severity', 'display_expression', 'resolve_minutes', 'private_tags', 'shared_tags', 'additional_information'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_alert_from_parts" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `create_alert_from_parts`") # verify the required parameter 'condition' is set if ('condition' not in params) or (params['condition'] is None): raise ValueError("Missing the required parameter `condition` when calling `create_alert_from_parts`") # verify the required parameter 'minutes' is set if ('minutes' not in params) or (params['minutes'] is None): raise ValueError("Missing the required parameter `minutes` when calling `create_alert_from_parts`") # verify the required parameter 'notifications' is set if ('notifications' not in params) or (params['notifications'] is None): raise ValueError("Missing the required parameter `notifications` when calling `create_alert_from_parts`") # verify the required parameter 'severity' is set if ('severity' not in params) or (params['severity'] is None): raise ValueError("Missing the required parameter `severity` when calling `create_alert_from_parts`") resource_path = '/api/alert/create'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params = [] local_var_files = {} if 'name' in params: form_params.append(('name', params['name'])) if 'condition' in params: form_params.append(('condition', params['condition'])) if 'display_expression' in params: form_params.append(('displayExpression', params['display_expression'])) if 'minutes' in params: form_params.append(('minutes', params['minutes'])) if 'resolve_minutes' in params: form_params.append(('resolveMinutes', params['resolve_minutes'])) if 'notifications' in params: form_params.append(('notifications', params['notifications'])) if 'severity' in params: form_params.append(('severity', params['severity'])) if 'private_tags' in params: form_params.append(('privateTags', params['private_tags'])) if 'shared_tags' in params: form_params.append(('sharedTags', params['shared_tags'])) if 'additional_information' in params: form_params.append(('additionalInformation', params['additional_information'])) body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/x-www-form-urlencoded']) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Alert', auth_settings=auth_settings, callback=params.get('callback')) return response def get_active_alerts(self, **kwargs): """ Get Active Alerts Return all firing alerts This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_active_alerts(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] customer_tag: Restrict result to alerts with this shared tag :param list[str] user_tag: Restrict result to alerts with this private tag :return: list[Alert] If the method is called asynchronously, returns the request thread. """ all_params = ['customer_tag', 'user_tag'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_active_alerts" % key ) params[key] = val del params['kwargs'] resource_path = '/api/alert/active'.replace('{format}', 'json') path_params = {} query_params = {} if 'customer_tag' in params: query_params['customerTag'] = params['customer_tag'] if 'user_tag' in params: query_params['userTag'] = params['user_tag'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Alert]', auth_settings=auth_settings, callback=params.get('callback')) return response def get_alert(self, **kwargs): """ Retrieve a list of alerts for a particular view. (Deprecated: Retrieve a single alert by its id (creation time)) This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_alert(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int created: (Deprecated) :param str view: :param list[str] customer_tag: Restrict result to alerts with this shared tag :param list[str] user_tag: Restrict result to alerts with this private tag :return: Alert If the method is called asynchronously, returns the request thread. """ all_params = ['created', 'view', 'customer_tag', 'user_tag'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_alert" % key ) params[key] = val del params['kwargs'] resource_path = '/api/alert'.replace('{format}', 'json') path_params = {} query_params = {} if 'created' in params: query_params['created'] = params['created'] if 'view' in params: query_params['view'] = params['view'] if 'customer_tag' in params: query_params['customerTag'] = params['customer_tag'] if 'user_tag' in params: query_params['userTag'] = params['user_tag'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Alert', auth_settings=auth_settings, callback=params.get('callback')) return response def get_alerts(self, **kwargs): """ Get All Alerts Return all alerts This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_alerts(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] customer_tag: Restrict result to alerts with this shared tag :param list[str] user_tag: Restrict result to alerts with this private tag :return: list[Alert] If the method is called asynchronously, returns the request thread. """ all_params = ['customer_tag', 'user_tag'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_alerts" % key ) params[key] = val del params['kwargs'] resource_path = '/api/alert/all'.replace('{format}', 'json') path_params = {} query_params = {} if 'customer_tag' in params: query_params['customerTag'] = params['customer_tag'] if 'user_tag' in params: query_params['userTag'] = params['user_tag'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Alert]', auth_settings=auth_settings, callback=params.get('callback')) return response def get_alerts_affected_by_maintenance(self, **kwargs): """ Get In Maintenance Alerts Return all alerts currently in a maintenance window This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_alerts_affected_by_maintenance(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] customer_tag: Restrict result to alerts with this shared tag :param list[str] user_tag: Restrict result to alerts with this private tag :return: list[Alert] If the method is called asynchronously, returns the request thread. """ all_params = ['customer_tag', 'user_tag'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_alerts_affected_by_maintenance" % key ) params[key] = val del params['kwargs'] resource_path = '/api/alert/affected_by_maintenance'.replace('{format}', 'json') path_params = {} query_params = {} if 'customer_tag' in params: query_params['customerTag'] = params['customer_tag'] if 'user_tag' in params: query_params['userTag'] = params['user_tag'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Alert]', auth_settings=auth_settings, callback=params.get('callback')) return response def get_invalid_alerts(self, **kwargs): """ Get Invalid Alerts Return all alerts that have an invalid query This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_invalid_alerts(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] customer_tag: Restrict result to alerts with this shared tag :param list[str] user_tag: Restrict result to alerts with this private tag :return: list[Alert] If the method is called asynchronously, returns the request thread. """ all_params = ['customer_tag', 'user_tag'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_invalid_alerts" % key ) params[key] = val del params['kwargs'] resource_path = '/api/alert/invalid'.replace('{format}', 'json') path_params = {} query_params = {} if 'customer_tag' in params: query_params['customerTag'] = params['customer_tag'] if 'user_tag' in params: query_params['userTag'] = params['user_tag'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Alert]', auth_settings=auth_settings, callback=params.get('callback')) return response def get_snoozed_alerts(self, **kwargs): """ Get Snoozed Alerts Return all snoozed alerts This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_snoozed_alerts(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] customer_tag: Restrict result to alerts with this shared tag :param list[str] user_tag: Restrict result to alerts with this private tag :return: list[Alert] If the method is called asynchronously, returns the request thread. """ all_params = ['customer_tag', 'user_tag'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_snoozed_alerts" % key ) params[key] = val del params['kwargs'] resource_path = '/api/alert/snoozed'.replace('{format}', 'json') path_params = {} query_params = {} if 'customer_tag' in params: query_params['customerTag'] = params['customer_tag'] if 'user_tag' in params: query_params['userTag'] = params['user_tag'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[Alert]', auth_settings=auth_settings, callback=params.get('callback')) return response def get_specific_alert(self, created, **kwargs): """ Retrieve a single alert by its id (creation time) This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific_alert(created, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int created: (required) :return: Alert If the method is called asynchronously, returns the request thread. """ all_params = ['created'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_specific_alert" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'created' is set if ('created' not in params) or (params['created'] is None): raise ValueError("Missing the required parameter `created` when calling `get_specific_alert`") resource_path = '/api/alert/{created}'.replace('{format}', 'json') path_params = {} if 'created' in params: path_params['created'] = params['created'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type([]) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Alert', auth_settings=auth_settings, callback=params.get('callback')) return response def update_alert_from_parts(self, alert_id, **kwargs): """ Update an alert This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_alert_from_parts(alert_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int alert_id: (required) :param str name: Descriptive name for the alert :param str condition: A query that will trigger the alert if non-zero results are observed for given number of minutes :param str display_expression: An optional query that will be shown when the alert fires. Use this to show a more helpful chart, e.g. the underlying timeseries :param int minutes: Number of minutes for the query to return non-zero results before the alert fires. Must be 2 or higher :param int resolve_minutes: Number of minutes for the query to return 0 as a result before the alert resolves. Defaults to the same as minutes to fire if not set. Must be 2 or higher :param str notifications: Up to ten addresses can be listed, separated by commas. Notifications will be sent to all targets on the list. To trigger a PagerDuty incident, specify a \"pd:key\" target with the 32-digit hex API key you created. PagerDuty incidents will be automatically triggered, updated, and resolved. :param str severity: Severity :param str private_tags: Comma separated list of private tags to be associated with this alert :param str shared_tags: Comma separated list of shared tags to be associated with this alert :param str additional_information: Any additional information to be included with this alert :return: Alert If the method is called asynchronously, returns the request thread. """ all_params = ['alert_id', 'name', 'condition', 'display_expression', 'minutes', 'resolve_minutes', 'notifications', 'severity', 'private_tags', 'shared_tags', 'additional_information'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_alert_from_parts" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'alert_id' is set if ('alert_id' not in params) or (params['alert_id'] is None): raise ValueError("Missing the required parameter `alert_id` when calling `update_alert_from_parts`") resource_path = '/api/alert/{alertId}'.replace('{format}', 'json') path_params = {} if 'alert_id' in params: path_params['alertId'] = params['alert_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} if 'name' in params: form_params.append(('name', params['name'])) if 'condition' in params: form_params.append(('condition', params['condition'])) if 'display_expression' in params: form_params.append(('displayExpression', params['display_expression'])) if 'minutes' in params: form_params.append(('minutes', params['minutes'])) if 'resolve_minutes' in params: form_params.append(('resolveMinutes', params['resolve_minutes'])) if 'notifications' in params: form_params.append(('notifications', params['notifications'])) if 'severity' in params: form_params.append(('severity', params['severity'])) if 'private_tags' in params: form_params.append(('privateTags', params['private_tags'])) if 'shared_tags' in params: form_params.append(('sharedTags', params['shared_tags'])) if 'additional_information' in params: form_params.append(('additionalInformation', params['additional_information'])) body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/x-www-form-urlencoded']) # Authentication setting auth_settings = ['api_key'] response = self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Alert', auth_settings=auth_settings, callback=params.get('callback')) return response
# -*- coding: utf-8 -*- from datetime import date, datetime from dateutil.relativedelta import relativedelta from openerp import api, fields, models, _ from openerp.exceptions import UserError, ValidationError from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF from openerp.tools import float_compare class AccountAssetCategory(models.Model): _name = 'account.asset.category' _description = 'Asset category' active = fields.Boolean(default=True) name = fields.Char(required=True, index=True, string="Asset Type") account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account', domain=[('account_type', '=', 'normal')]) account_asset_id = fields.Many2one('account.account', string='Asset Account', required=True, domain=[('internal_type','=','other'), ('deprecated', '=', False)]) account_income_recognition_id = fields.Many2one('account.account', string='Recognition Income Account', domain=[('internal_type','=','other'), ('deprecated', '=', False)], oldname='account_expense_depreciation_id') account_depreciation_id = fields.Many2one('account.account', string='Depreciation Account', required=True, domain=[('internal_type','=','other'), ('deprecated', '=', False)]) journal_id = fields.Many2one('account.journal', string='Journal', required=True) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env['res.company']._company_default_get('account.asset.category')) method = fields.Selection([('linear', 'Linear'), ('degressive', 'Degressive')], string='Computation Method', required=True, default='linear', help="Choose the method to use to compute the amount of depreciation lines.\n" " * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" " * Degressive: Calculated on basis of: Residual Value * Degressive Factor") method_number = fields.Integer(string='Number of Depreciations', default=5, help="The number of depreciations needed to depreciate your asset") method_period = fields.Integer(string='Period Length', default=1, help="State here the time between 2 depreciations, in months", required=True) method_progress_factor = fields.Float('Degressive Factor', default=0.3) method_time = fields.Selection([('number', 'Number of Depreciations'), ('end', 'Ending Date')], string='Time Method', required=True, default='number', help="Choose the method to use to compute the dates and number of depreciation lines.\n" " * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" " * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond.") method_end = fields.Date('Ending date') prorata = fields.Boolean(string='Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first of January') open_asset = fields.Boolean(string='Post Journal Entries', help="Check this if you want to automatically confirm the assets of this category when created by invoices.") type = fields.Selection([('sale', 'Sale: Revenue Recognition'), ('purchase', 'Purchase: Asset')], required=True, index=True, default='purchase') @api.onchange('type') def onchange_type(self): if self.type == 'sale': self.prorata = True self.method_period = 1 else: self.method_period = 12 class AccountAssetAsset(models.Model): _name = 'account.asset.asset' _description = 'Asset/Revenue Recognition' _inherit = ['mail.thread', 'ir.needaction_mixin'] account_move_ids = fields.One2many('account.move', 'asset_id', string='Entries', readonly=True, states={'draft': [('readonly', False)]}) entry_count = fields.Integer(compute='_entry_count', string='# Asset Entries') name = fields.Char(string='Asset Name', required=True, readonly=True, states={'draft': [('readonly', False)]}) code = fields.Char(string='Reference', size=32, readonly=True, states={'draft': [('readonly', False)]}) value = fields.Float(string='Gross Value', required=True, readonly=True, digits=0, states={'draft': [('readonly', False)]}, oldname='purchase_value') currency_id = fields.Many2one('res.currency', string='Currency', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env.user.company_id.currency_id.id) company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env['res.company']._company_default_get('account.asset.asset')) note = fields.Text() category_id = fields.Many2one('account.asset.category', string='Category', required=True, change_default=True, readonly=True, states={'draft': [('readonly', False)]}) date = fields.Date(string='Date', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=fields.Date.context_today, oldname="purchase_date") state = fields.Selection([('draft', 'Draft'), ('open', 'Running'), ('close', 'Close')], 'Status', required=True, copy=False, default='draft', help="When an asset is created, the status is 'Draft'.\n" "If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n" "You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status.") active = fields.Boolean(default=True) partner_id = fields.Many2one('res.partner', string='Partner', readonly=True, states={'draft': [('readonly', False)]}) method = fields.Selection([('linear', 'Linear'), ('degressive', 'Degressive')], string='Computation Method', required=True, readonly=True, states={'draft': [('readonly', False)]}, default='linear', help="Choose the method to use to compute the amount of depreciation lines.\n * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" " * Degressive: Calculated on basis of: Residual Value * Degressive Factor") method_number = fields.Integer(string='Number of Depreciations', readonly=True, states={'draft': [('readonly', False)]}, default=5, help="The number of depreciations needed to depreciate your asset") method_period = fields.Integer(string='Number of Months in a Period', required=True, readonly=True, default=12, states={'draft': [('readonly', False)]}, help="The amount of time between two depreciations, in months") method_end = fields.Date(string='Ending Date', readonly=True, states={'draft': [('readonly', False)]}) method_progress_factor = fields.Float(string='Degressive Factor', readonly=True, default=0.3, states={'draft': [('readonly', False)]}) value_residual = fields.Float(compute='_amount_residual', method=True, digits=0, string='Residual Value') method_time = fields.Selection([('number', 'Number of Depreciations'), ('end', 'Ending Date')], string='Time Method', required=True, readonly=True, default='number', states={'draft': [('readonly', False)]}, help="Choose the method to use to compute the dates and number of depreciation lines.\n" " * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" " * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond.") prorata = fields.Boolean(string='Prorata Temporis', readonly=True, states={'draft': [('readonly', False)]}, help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January / Start date of fiscal year') depreciation_line_ids = fields.One2many('account.asset.depreciation.line', 'asset_id', string='Depreciation Lines', readonly=True, states={'draft': [('readonly', False)], 'open': [('readonly', False)]}) salvage_value = fields.Float(string='Salvage Value', digits=0, readonly=True, states={'draft': [('readonly', False)]}, help="It is the amount you plan to have that you cannot depreciate.") invoice_id = fields.Many2one('account.invoice', string='Invoice', states={'draft': [('readonly', False)]}, copy=False) type = fields.Selection(related="category_id.type", string='Type', required=True) @api.multi def unlink(self): for asset in self: if asset.state in ['open', 'close']: raise UserError(_('You cannot delete a document is in %s state.') % (asset.state,)) if asset.account_move_ids: raise UserError(_('You cannot delete a document that contains posted entries.')) return super(AccountAssetAsset, self).unlink() @api.multi def _get_last_depreciation_date(self): """ @param id: ids of a account.asset.asset objects @return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset """ self.env.cr.execute(""" SELECT a.id as id, COALESCE(MAX(m.date),a.date) AS date FROM account_asset_asset a LEFT JOIN account_move m ON (m.asset_id = a.id) WHERE a.id IN %s GROUP BY a.id, a.date """, (tuple(self.ids),)) result = dict(self.env.cr.fetchall()) return result @api.model def _cron_generate_entries(self): assets = self.env['account.asset.asset'].search([('state', '=', 'open')]) assets._compute_entries(datetime.today()) def _compute_board_amount(self, sequence, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date): amount = 0 if sequence == undone_dotation_number: amount = residual_amount else: if self.method == 'linear': amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids)) if self.prorata and self.category_id.type == 'purchase': amount = amount_to_depr / self.method_number if sequence == 1: days = (self.company_id.compute_fiscalyear_dates(depreciation_date)['date_to'] - depreciation_date).days + 1 amount = (amount_to_depr / self.method_number) / total_days * days elif self.method == 'degressive': amount = residual_amount * self.method_progress_factor if self.prorata: if sequence == 1: days = (self.company_id.compute_fiscalyear_dates(depreciation_date)['date_to'] - depreciation_date).days + 1 amount = (residual_amount * self.method_progress_factor) / total_days * days return amount def _compute_board_undone_dotation_nb(self, depreciation_date, total_days): undone_dotation_number = self.method_number if self.method_time == 'end': end_date = datetime.strptime(self.method_end, DF).date() undone_dotation_number = 0 while depreciation_date <= end_date: depreciation_date = date(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+self.method_period) undone_dotation_number += 1 if self.prorata and self.category_id.type == 'purchase': undone_dotation_number += 1 return undone_dotation_number @api.multi def compute_depreciation_board(self): self.ensure_one() posted_depreciation_line_ids = self.depreciation_line_ids.filtered(lambda x: x.move_check).sorted(key=lambda l: l.depreciation_date) unposted_depreciation_line_ids = self.depreciation_line_ids.filtered(lambda x: not x.move_check) # Remove old unposted depreciation lines. We cannot use unlink() with One2many field commands = [(2, line_id.id, False) for line_id in unposted_depreciation_line_ids] if self.value_residual != 0.0: amount_to_depr = residual_amount = self.value_residual if self.prorata: depreciation_date = datetime.strptime(self._get_last_depreciation_date()[self.id], DF).date() else: # depreciation_date = 1st of January of purchase year if annual valuation, 1st of # purchase month in other cases if self.method_period >= 12: asset_date = datetime.strptime(self.date[:4] + '-01-01', DF).date() else: asset_date = datetime.strptime(self.date[:7] + '-01', DF).date() # if we already have some previous validated entries, starting date isn't 1st January but last entry + method period if posted_depreciation_line_ids and posted_depreciation_line_ids[-1].depreciation_date: last_depreciation_date = datetime.strptime(posted_depreciation_line_ids[-1].depreciation_date, DF).date() depreciation_date = last_depreciation_date + relativedelta(months=+self.method_period) else: depreciation_date = asset_date day = depreciation_date.day month = depreciation_date.month year = depreciation_date.year total_days = (year % 4) and 365 or 366 undone_dotation_number = self._compute_board_undone_dotation_nb(depreciation_date, total_days) for x in range(len(posted_depreciation_line_ids), undone_dotation_number): sequence = x + 1 amount = self._compute_board_amount(sequence, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date) amount = self.currency_id.round(amount) residual_amount -= amount vals = { 'amount': amount, 'asset_id': self.id, 'sequence': sequence, 'name': (self.code or '') + '/' + str(sequence), 'remaining_value': residual_amount, 'depreciated_value': self.value - (self.salvage_value + residual_amount), 'depreciation_date': depreciation_date.strftime(DF), } commands.append((0, False, vals)) # Considering Depr. Period as months depreciation_date = date(year, month, day) + relativedelta(months=+self.method_period) day = depreciation_date.day month = depreciation_date.month year = depreciation_date.year self.write({'depreciation_line_ids': commands}) return True @api.multi def validate(self): self.write({'state': 'open'}) fields = [ 'method', 'method_number', 'method_period', 'method_end', 'method_progress_factor', 'method_time', 'salvage_value', 'invoice_id', ] ref_tracked_fields = self.env['account.asset.asset'].fields_get(fields) for asset in self: tracked_fields = ref_tracked_fields.copy() if asset.method == 'linear': del(tracked_fields['method_progress_factor']) if asset.method_time != 'end': del(tracked_fields['method_end']) else: del(tracked_fields['method_number']) dummy, tracking_value_ids = asset._message_track(tracked_fields, dict.fromkeys(fields)) asset.message_post(subject=_('Asset created'), tracking_value_ids=tracking_value_ids) @api.multi def set_to_close(self): move_ids = [] for asset in self: unposted_depreciation_line_ids = asset.depreciation_line_ids.filtered(lambda x: not x.move_check) if unposted_depreciation_line_ids: old_values = { 'method_end': asset.method_end, 'method_number': asset.method_number, } # Remove all unposted depr. lines commands = [(2, line_id.id, False) for line_id in unposted_depreciation_line_ids] # Create a new depr. line with the residual amount and post it sequence = len(asset.depreciation_line_ids) - len(unposted_depreciation_line_ids) + 1 today = datetime.today().strftime(DF) vals = { 'amount': asset.value_residual, 'asset_id': asset.id, 'sequence': sequence, 'name': (asset.code or '') + '/' + str(sequence), 'remaining_value': 0, 'depreciated_value': asset.value - asset.salvage_value, # the asset is completely depreciated 'depreciation_date': today, } commands.append((0, False, vals)) asset.write({'depreciation_line_ids': commands, 'method_end': today, 'method_number': sequence}) tracked_fields = self.env['account.asset.asset'].fields_get(['method_number', 'method_end']) changes, tracking_value_ids = asset._message_track(tracked_fields, old_values) if changes: asset.message_post(subject=_('Asset sold or disposed. Accounting entry awaiting for validation.'), tracking_value_ids=tracking_value_ids) move_ids += asset.depreciation_line_ids[-1].create_move(post_move=False) if move_ids: name = _('Disposal Move') view_mode = 'form' if len(move_ids) > 1: name = _('Disposal Moves') view_mode = 'tree,form' return { 'name': name, 'view_type': 'form', 'view_mode': view_mode, 'res_model': 'account.move', 'type': 'ir.actions.act_window', 'target': 'current', 'res_id': move_ids[0], } @api.multi def set_to_draft(self): self.write({'state': 'draft'}) @api.one @api.depends('value', 'salvage_value', 'depreciation_line_ids.move_check', 'depreciation_line_ids.amount') def _amount_residual(self): total_amount = 0.0 for line in self.depreciation_line_ids: if line.move_check: total_amount += line.amount self.value_residual = self.value - total_amount - self.salvage_value @api.onchange('company_id') def onchange_company_id(self): self.currency_id = self.company_id.currency_id.id @api.multi @api.depends('account_move_ids') def _entry_count(self): for asset in self: asset.entry_count = self.env['account.move'].search_count([('asset_id', '=', asset.id)]) @api.one @api.constrains('prorata', 'method_time') def _check_prorata(self): if self.prorata and self.method_time != 'number': raise ValidationError(_('Prorata temporis can be applied only for time method "number of depreciations".')) @api.onchange('category_id') def onchange_category_id(self): vals = self.onchange_category_id_values(self.category_id.id) # We cannot use 'write' on an object that doesn't exist yet if vals: for k, v in vals['value'].iteritems(): setattr(self, k, v) def onchange_category_id_values(self, category_id): if category_id: category = self.env['account.asset.category'].browse(category_id) return { 'value': { 'method': category.method, 'method_number': category.method_number, 'method_time': category.method_time, 'method_period': category.method_period, 'method_progress_factor': category.method_progress_factor, 'method_end': category.method_end, 'prorata': category.prorata, } } @api.onchange('method_time') def onchange_method_time(self): if self.method_time != 'number': self.prorata = False @api.multi def copy_data(self, default=None): if default is None: default = {} default['name'] = self.name + _(' (copy)') return super(AccountAssetAsset, self).copy_data(default)[0] @api.multi def _compute_entries(self, date): depreciation_ids = self.env['account.asset.depreciation.line'].search([ ('asset_id', 'in', self.ids), ('depreciation_date', '<=', date), ('move_check', '=', False)]) return depreciation_ids.create_move() @api.model def create(self, vals): asset = super(AccountAssetAsset, self.with_context(mail_create_nolog=True)).create(vals) asset.compute_depreciation_board() return asset @api.multi def write(self, vals): res = super(AccountAssetAsset, self).write(vals) if 'depreciation_line_ids' not in vals and 'state' not in vals: for rec in self: rec.compute_depreciation_board() return res @api.multi def open_entries(self): return { 'name': _('Journal Entries'), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'account.move', 'view_id': False, 'type': 'ir.actions.act_window', 'context': dict(self.env.context or {}, search_default_asset_id=self.id, default_asset_id=self.id), } class AccountAssetDepreciationLine(models.Model): _name = 'account.asset.depreciation.line' _description = 'Asset depreciation line' name = fields.Char(string='Depreciation Name', required=True, index=True) sequence = fields.Integer(required=True) asset_id = fields.Many2one('account.asset.asset', string='Asset', required=True, ondelete='cascade') parent_state = fields.Selection(related='asset_id.state', string='State of Asset') amount = fields.Float(string='Current Depreciation', digits=0, required=True) remaining_value = fields.Float(string='Next Period Depreciation', digits=0, required=True) depreciated_value = fields.Float(string='Cumulative Depreciation', required=True) depreciation_date = fields.Date('Depreciation Date', index=True) move_id = fields.Many2one('account.move', string='Depreciation Entry') move_check = fields.Boolean(compute='_get_move_check', string='Posted', track_visibility='always', store=True) @api.one @api.depends('move_id') def _get_move_check(self): self.move_check = bool(self.move_id) @api.multi def create_move(self, post_move=True): created_moves = self.env['account.move'] for line in self: depreciation_date = self.env.context.get('depreciation_date') or line.depreciation_date or fields.Date.context_today(self) company_currency = line.asset_id.company_id.currency_id current_currency = line.asset_id.currency_id amount = current_currency.compute(line.amount, company_currency) sign = (line.asset_id.category_id.journal_id.type == 'purchase' or line.asset_id.category_id.journal_id.type == 'sale' and 1) or -1 asset_name = line.asset_id.name + ' (%s/%s)' % (line.sequence, line.asset_id.method_number) reference = line.asset_id.code journal_id = line.asset_id.category_id.journal_id.id partner_id = line.asset_id.partner_id.id categ_type = line.asset_id.category_id.type debit_account = line.asset_id.category_id.account_asset_id.id credit_account = line.asset_id.category_id.account_depreciation_id.id prec = self.env['decimal.precision'].precision_get('Account') move_line_1 = { 'name': asset_name, 'account_id': credit_account, 'debit': 0.0 if float_compare(amount, 0.0, precision_digits=prec) > 0 else -amount, 'credit': amount if float_compare(amount, 0.0, precision_digits=prec) > 0 else 0.0, 'journal_id': journal_id, 'partner_id': partner_id, 'currency_id': company_currency != current_currency and current_currency.id or False, 'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0, 'analytic_account_id': line.asset_id.category_id.account_analytic_id.id if categ_type == 'sale' else False, 'date': depreciation_date, } move_line_2 = { 'name': asset_name, 'account_id': debit_account, 'credit': 0.0 if float_compare(amount, 0.0, precision_digits=prec) > 0 else -amount, 'debit': amount if float_compare(amount, 0.0, precision_digits=prec) > 0 else 0.0, 'journal_id': journal_id, 'partner_id': partner_id, 'currency_id': company_currency != current_currency and current_currency.id or False, 'amount_currency': company_currency != current_currency and sign * line.amount or 0.0, 'analytic_account_id': line.asset_id.category_id.account_analytic_id.id if categ_type == 'purchase' else False, 'date': depreciation_date, } move_vals = { 'ref': reference, 'date': depreciation_date or False, 'journal_id': line.asset_id.category_id.journal_id.id, 'line_ids': [(0, 0, move_line_1), (0, 0, move_line_2)], 'asset_id': line.asset_id.id, } move = self.env['account.move'].create(move_vals) line.write({'move_id': move.id, 'move_check': True}) created_moves |= move if post_move and created_moves: created_moves.filtered(lambda r: r.asset_id and r.asset_id.category_id and r.asset_id.category_id.open_asset).post() return [x.id for x in created_moves] @api.multi def post_lines_and_close_asset(self): # we re-evaluate the assets to determine whether we can close them for line in self: line.log_message_when_posted() asset = line.asset_id if asset.currency_id.is_zero(asset.value_residual): asset.message_post(body=_("Document closed.")) asset.write({'state': 'close'}) @api.multi def log_message_when_posted(self): def _format_message(message_description, tracked_values): message = '' if message_description: message = '<span>%s</span>' % message_description for name, values in tracked_values.iteritems(): message += '<div> &nbsp; &nbsp; &bull; <b>%s</b>: ' % name message += '%s</div>' % values return message for line in self: if line.move_id and line.move_id.state == 'draft': partner_name = line.asset_id.partner_id.name currency_name = line.asset_id.currency_id.name msg_values = {_('Currency'): currency_name, _('Amount'): line.amount} if partner_name: msg_values[_('Partner')] = partner_name msg = _format_message(_('Depreciation line posted.'), msg_values) line.asset_id.message_post(body=msg) @api.multi def unlink(self): for record in self: if record.move_check: if record.asset_id.category_id.type == 'purchase': msg = _("You cannot delete posted depreciation lines.") else: msg = _("You cannot delete posted installment lines.") raise UserError(msg) return super(AccountAssetDepreciationLine, self).unlink() class AccountMove(models.Model): _inherit = 'account.move' asset_id = fields.Many2one('account.asset.asset', string='Asset', ondelete="restrict") @api.multi def post(self): for move in self: if move.asset_id: move.asset_id.depreciation_line_ids.post_lines_and_close_asset() return super(AccountMove, self).post()
# Copyright (C) 2013-2014 Computer Sciences Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- """ Created on Mon Mar 24 08:03:20 2014 @author: jhastings """ import os import errno import socket import atexit import shutil import tarfile from StringIO import StringIO import Crypto.Random from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer from ezbake.thrift.transport.EzSSLSocket import TSSLServerSocket from ezbake.configuration.EzConfiguration import EzConfiguration from ezbake.configuration.loaders.DirectoryConfigurationLoader import DirectoryConfigurationLoader from ezbake.configuration.helpers import ZookeeperConfiguration from ezbake.configuration.helpers import ApplicationConfiguration from ezbake.configuration.helpers import SystemConfiguration from ezbake.discovery import ServiceDiscoveryClient from kazoo.handlers.threading import TimeoutError from ezpersist.base import MemoryPersist from ezpersist.file import FilePersist import ezbakeca.ca from ezbakeca.cert import Cert from ezbakeca.ca import EzbakeCA from ezbake.ezca import EzCA import ezbake.ezca.ttypes import ezbake.ezca.constants import logging import logging.handlers logger = logging.getLogger(__name__) integer_123 = 1 class EzCAHandler: TABLE_NAME = "_EZ_CA_" SERVER_CERT_NAME = "EzCAService" PERSIST_MODE = "ezca.persist.mode" CLIENT_CERTS = "ezca.autogen.clients" CLIENT_CERT_O = "ezca.autogen.clients.out" CLIENT_CERT_O_DEF = "gen" def __init__(self, ca_name, ezconfig=EzConfiguration().getProperties()): mode = ezconfig.get(EzCAHandler.PERSIST_MODE, "file") if mode == "file": store = FilePersist(EzCAHandler.TABLE_NAME) elif mode == "accumulo": raise NotImplementedError("accumulo persistance not supported by EzCA yet") else: store = MemoryPersist() EzbakeCA.setup(store=store) Cert.setup(store=store) self.store = store try: logger.info("Reading CA certificate {}".format(ca_name)) self.ca = EzbakeCA.get_named(ca_name) except KeyError: self.ca = EzbakeCA(name=ca_name) self.ca.save() def _server_certs(self): """returns a dict of {ca_certs, certs, key} and their values""" ca_certs = ezbakeca.ca.pem_cert(self.ca.ca_cert) cert = Cert.get_named(self.SERVER_CERT_NAME) if not cert.cert: cert.cert = self.ca.sign_csr(cert.csr()) cert.save() key = ezbakeca.ca.pem_key(cert.private_key) cert = ezbakeca.ca.pem_cert(cert.cert) return {'ca_certs': ca_certs, 'cert': cert, 'key': key} def ping(self): return True def csr(self, token, csr): csr = ezbakeca.ca.load_csr(csr) # but since this is a Protect component, and we are only allowing # access to this service by trusted cert CNs, I can accept that. # still, we must log what's happening. logger.info("CSR signing request for Subject: {}. Token security ID: {}, " "target security ID: {}, userInfo: {}".format(csr.get_subject(), token.validity.issuedTo, token.validity.issuedFor, token.tokenPrincipal.principal)) # sign the csr and get the cert cert = self.ca.sign_csr(csr) # return the cert as pem return ezbakeca.ca.pem_cert(cert) def ca_server(ezconfig, service_name=None, ca_name="ezbakeca", zoo_host=None, host=None, port=None, verify_pattern=None, ssldir=None): Crypto.Random.atfork() # make sure zookeeper is available for registering with service discovery if zoo_host is None: zooConf = ZookeeperConfiguration(ezconfig) zoo_host = zooConf.getZookeeperConnectionString() if not zoo_host: raise RuntimeError("Zookeeper connection string must be specified " "in EzConfiguration") # make sure the ssl certificate directory is available if not ssldir: ac = ApplicationConfiguration(ezconfig) ssldir = ac.getCertificatesDir() if not ssldir: raise RuntimeError("Certificates Directory \"{0}\" must be set in" " EzConfiguration!".format( ApplicationConfiguration.CERTIFICATES_DIRECTORY_KEY)) # get a free port to bind to (and figure out our hostname) if not port: port = get_port(range(31005,34999)) if not host: host = socket.gethostname() # register with ezdiscovery ezdiscovery = ServiceDiscoveryClient(zoo_host) try: if service_name is None: service_name = ezbake.ezca.constants.SERVICE_NAME logger.info('Registering with service discovery') ezdiscovery.register_common_endpoint(service_name=service_name, host=host, port=port) ezdiscovery.set_security_id_for_common_service(service_name=service_name, security_id="EzCAService") except TimeoutError as e: logger.error("Fatal timeout connecting to zookeeper. Unable to " "register with service discovery.") raise e # create the thrift handler handler = EzCAHandler(ca_name, ezconfig) # generate/get the server SSL certs and write them to disk certs = handler._server_certs() cert_files = [] for k, cert in certs.items(): of = os.path.join(ssldir, k) cert_files.append(of) with os.fdopen(os.open(of, os.O_WRONLY | os.O_CREAT, 0o600), 'w') as ofs: ofs.write(str(cert)) # generate certs for configured clients (read from ezconfig) clients = ezconfig.get(EzCAHandler.CLIENT_CERTS) if clients: gen_client_certs(handler.ca, clients.split(','), ezconfig.get(EzCAHandler.CLIENT_CERT_O)) # start the thrift server processor = EzCA.Processor(handler) transport = TSSLServerSocket(host=host, port=port, verify_pattern=verify_pattern, ca_certs=cert_files[0], cert=cert_files[1], key=cert_files[2]) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory) logger.info('Starting ezca service on {}:{}'.format(host,port)) server.serve() def ezpersist_instance(mode): if mode == "file": store = FilePersist(EzCAHandler.TABLE_NAME) elif mode == "accumulo": raise NotImplementedError("accumulo persistance not supported by EzCA yet") else: store = MemoryPersist() return store def get_port(rang=None): """Find an open port, optionally in a range""" port = None while port is None: p = 0 if rang: p = rang.pop(0) try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("",p)) s.listen(1) port = s.getsockname()[1] s.close() break except: port = None continue return port def gen_client_certs(ca, clients, directory=None, force=False): try: for client in filter(None, clients): cert = Cert.get_named(client) if not cert.cert: # client doesn't have a cert yet. generate it logger.info("Generating client certificate for {}" .format(client)) cert.cert = ca.sign_csr(cert.csr()) elif force: logger.info("Force regenerating client certificate for {}" .format(client)) cert.cert = ca.sign_csr(cert.csr()) else: # check the client's cert was issued by us, if not, regen casubject = ca.ca_cert.get_issuer() issuer = cert.cert.get_issuer() if casubject != issuer: logger.info("Client certification for {} was issued " \ "by another certificate authority. Re-issuing " \ "the cert".format(client)) cert.cert = ca.sign_csr(cert.csr()) cert.save() if directory: # Now write the archive to disk for easy retrieval tar_certs(ca.cert_string(), cert, directory) except TypeError as e: logger.warn("not generating client certificates, {}".format(e)) def setup_logging(verbose, config): sys_config = SystemConfiguration(config) log = logging.getLogger(__package__) log.setLevel(logging.INFO) hostname = socket.gethostname() formatter = logging.Formatter(hostname+' %(asctime)s [%(threadName)s] %(levelname)-5s %(name)s - %(message)s') if verbose or sys_config.shouldLogToStdOut(): sh = logging.StreamHandler() sh.setLevel(logging.INFO) sh.setFormatter(formatter) log.addHandler(sh) # Create the log directory log_dir = os.path.join(sys_config.getLogDirectory(),'ezca') try: os.mkdir(log_dir) except OSError as e: if e.errno != errno.EEXIST: logger.error("Unable to create the log directory {}".format( log_dir)) raise e wfh = logging.handlers.WatchedFileHandler(os.path.join(log_dir, 'ezca.log')) wfh.setLevel(logging.INFO) wfh.setFormatter(formatter) log.addHandler(wfh) def tar_certs(ca_cert, cert, directory): """Create a tarfile in the given directory from the given ca and certs""" if not directory or os.path.isfile(directory): logger.warn("Cert output directory {} None or a file".format(directory)) return if not os.path.exists(directory): try: os.mkdir(directory) except OSError as e: if e.errno != errno.EEXIST: logger.warn("Unable to create cert output directory") return cdir = os.path.join(directory, "{}.{}".format(cert.name, "tar.gz")) with os.fdopen(os.open(cdir, os.O_WRONLY|os.O_CREAT, 0o600), 'w') as ofs: with tarfile.open(fileobj=ofs, mode="w:gz") as tar: ca_info = tarfile.TarInfo("ezbakeca.crt") ca_info.size = len(ca_cert) tar.addfile(tarinfo=ca_info, fileobj=StringIO(ca_cert)) app_cert = cert.cert_string() cert_info = tarfile.TarInfo("application.crt") cert_info.size = len(app_cert) tar.addfile(tarinfo=cert_info, fileobj=StringIO(app_cert)) pk = cert.pkey_string() pk_info = tarfile.TarInfo("application.priv") pk_info.size = len(pk) tar.addfile(tarinfo=pk_info, fileobj=StringIO(pk)) def create_dirs(ssldir): logger.info("Creating the ssl direcotry on disk (required by openssl)") try: os.mkdir(ssldir) except OSError as e: if e.errno != errno.EEXIST: raise e logger.info("Creating the output directory for client certs") def delete_ssldir(ssldir): """Try to delete the ssl certs from the filesystem""" logger.info("Deleting ssl certs from disk") try: shutil.rmtree(ssldir) except Exception as e: logger.warn("Unable to remove the ssldir from disk {}".format(e)) def load_configuration(dir=None): loaders = [DirectoryConfigurationLoader()] if dir: loaders.append(DirectoryConfigurationLoader(dir)) return EzConfiguration(*loaders).getProperties() def main(config): # Load the EzConfiguration if config.has_key('ezconfig'): ezConfig = config['ezconfig'] else: ezConfig = load_configuration("config") # Configure logging setup_logging(config['verbose'], ezConfig) if config.has_key('clients'): ezConfig[EzCAHandler.CLIENT_CERTS] = config['clients'] # Setting up the SSL and output Directory ssldir = config['ssl_dir'] create_dirs(ssldir) # register shutdown hook atexit.register(delete_ssldir, ssldir) server = ca_server(ezConfig, config['service_name'], host=config['host'], ca_name=config['ca_name'], ssldir=ssldir, verify_pattern=config['verify_pattern']) server.start() def init(config): ezConfig = load_configuration("config") setup_logging(config.verbose, ezConfig) clients = config.clients.split(',') # initialize the daos store = ezpersist_instance("file") EzbakeCA.setup(store=store) Cert.setup(store=store) if config.force: store.delete(config.name) try: # Try to get it first, to see if it already exists ca = EzbakeCA.get_named(config.name) logger.info("CA %s not regenerated because it already exists", config.name) except KeyError: # Create the CA ca = EzbakeCA(name=config.name, environment=config.env) ca.save() gen_client_certs(ca, clients, directory=config.outdir, force=config.force)
# Copyright 2022 The DDSP Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilites for postprocessing datasets.""" from ddsp import spectral_ops from ddsp.core import hz_to_midi import numpy as np from scipy import stats import tensorflow.compat.v2 as tf def detect_notes(loudness_db, f0_confidence, note_threshold=1.0, exponent=2.0, smoothing=40, f0_confidence_threshold=0.7, min_db=-spectral_ops.DB_RANGE): """Detect note on-off using loudness and smoothed f0_confidence.""" mean_db = np.mean(loudness_db) db = smooth(f0_confidence**exponent, smoothing) * (loudness_db - min_db) db_threshold = (mean_db - min_db) * f0_confidence_threshold**exponent note_on_ratio = db / db_threshold mask_on = note_on_ratio >= note_threshold return mask_on, note_on_ratio def fit_quantile_transform(loudness_db, mask_on, inv_quantile=None): """Fits quantile normalization, given a note_on mask. Optionally, performs the inverse transformation given a pre-fitted transform. Args: loudness_db: Decibels, shape [batch, time] mask_on: A binary mask for when a note is present, shape [batch, time]. inv_quantile: Optional pretrained QuantileTransformer to perform the inverse transformation. Returns: Trained quantile transform. Also returns the renormalized loudnesses if inv_quantile is provided. """ quantile_transform = QuantileTransformer() loudness_flat = np.ravel(loudness_db[mask_on])[:, np.newaxis] loudness_flat_q = quantile_transform.fit_transform(loudness_flat) if inv_quantile is None: return quantile_transform else: loudness_flat_norm = inv_quantile.inverse_transform(loudness_flat_q) loudness_norm = np.ravel(loudness_db.copy())[:, np.newaxis] loudness_norm[mask_on] = loudness_flat_norm return quantile_transform, loudness_norm class QuantileTransformer: """Transform features using quantiles information. Stripped down version of sklearn.preprocessing.QuantileTransformer. https://github.com/scikit-learn/scikit-learn/blob/ 863e58fcd5ce960b4af60362b44d4f33f08c0f97/sklearn/preprocessing/_data.py Putting directly in ddsp library to avoid dependency on sklearn that breaks when pickling and unpickling from different versions of sklearn. """ def __init__(self, n_quantiles=1000, output_distribution='uniform', subsample=int(1e5)): """Constructor. Args: n_quantiles: int, default=1000 or n_samples Number of quantiles to be computed. It corresponds to the number of landmarks used to discretize the cumulative distribution function. If n_quantiles is larger than the number of samples, n_quantiles is set to the number of samples as a larger number of quantiles does not give a better approximation of the cumulative distribution function estimator. output_distribution: {'uniform', 'normal'}, default='uniform' Marginal distribution for the transformed data. The choices are 'uniform' (default) or 'normal'. subsample: int, default=1e5 Maximum number of samples used to estimate the quantiles for computational efficiency. Note that the subsampling procedure may differ for value-identical sparse and dense matrices. """ self.n_quantiles = n_quantiles self.output_distribution = output_distribution self.subsample = subsample self.random_state = np.random.mtrand._rand def _dense_fit(self, x, random_state): """Compute percentiles for dense matrices. Args: x: ndarray of shape (n_samples, n_features) The data used to scale along the features axis. random_state: Numpy random number generator. """ n_samples, _ = x.shape references = self.references_ * 100 self.quantiles_ = [] for col in x.T: if self.subsample < n_samples: subsample_idx = random_state.choice( n_samples, size=self.subsample, replace=False) col = col.take(subsample_idx, mode='clip') self.quantiles_.append(np.nanpercentile(col, references)) self.quantiles_ = np.transpose(self.quantiles_) # Due to floating-point precision error in `np.nanpercentile`, # make sure that quantiles are monotonically increasing. # Upstream issue in numpy: # https://github.com/numpy/numpy/issues/14685 self.quantiles_ = np.maximum.accumulate(self.quantiles_) def fit(self, x): """Compute the quantiles used for transforming. Parameters ---------- Args: x: {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_zeros` is False. Returns: self: object Fitted transformer. """ if self.n_quantiles <= 0: raise ValueError("Invalid value for 'n_quantiles': %d. " 'The number of quantiles must be at least one.' % self.n_quantiles) n_samples = x.shape[0] self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples)) # Create the quantiles of reference self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True) self._dense_fit(x, self.random_state) return self def _transform_col(self, x_col, quantiles, inverse): """Private function to transform a single feature.""" output_distribution = self.output_distribution bounds_threshold = 1e-7 if not inverse: lower_bound_x = quantiles[0] upper_bound_x = quantiles[-1] lower_bound_y = 0 upper_bound_y = 1 else: lower_bound_x = 0 upper_bound_x = 1 lower_bound_y = quantiles[0] upper_bound_y = quantiles[-1] # for inverse transform, match a uniform distribution with np.errstate(invalid='ignore'): # hide NaN comparison warnings if output_distribution == 'normal': x_col = stats.norm.cdf(x_col) # else output distribution is already a uniform distribution # find index for lower and higher bounds with np.errstate(invalid='ignore'): # hide NaN comparison warnings if output_distribution == 'normal': lower_bounds_idx = (x_col - bounds_threshold < lower_bound_x) upper_bounds_idx = (x_col + bounds_threshold > upper_bound_x) if output_distribution == 'uniform': lower_bounds_idx = (x_col == lower_bound_x) upper_bounds_idx = (x_col == upper_bound_x) isfinite_mask = ~np.isnan(x_col) x_col_finite = x_col[isfinite_mask] if not inverse: # Interpolate in one direction and in the other and take the # mean. This is in case of repeated values in the features # and hence repeated quantiles # # If we don't do this, only one extreme of the duplicated is # used (the upper when we do ascending, and the # lower for descending). We take the mean of these two x_col[isfinite_mask] = .5 * ( np.interp(x_col_finite, quantiles, self.references_) - np.interp(-x_col_finite, -quantiles[::-1], -self.references_[::-1])) else: x_col[isfinite_mask] = np.interp(x_col_finite, self.references_, quantiles) x_col[upper_bounds_idx] = upper_bound_y x_col[lower_bounds_idx] = lower_bound_y # for forward transform, match the output distribution if not inverse: with np.errstate(invalid='ignore'): # hide NaN comparison warnings if output_distribution == 'normal': x_col = stats.norm.ppf(x_col) # find the value to clip the data to avoid mapping to # infinity. Clip such that the inverse transform will be # consistent clip_min = stats.norm.ppf(bounds_threshold - np.spacing(1)) clip_max = stats.norm.ppf(1 - (bounds_threshold - np.spacing(1))) x_col = np.clip(x_col, clip_min, clip_max) # else output distribution is uniform and the ppf is the # identity function so we let x_col unchanged return x_col def _transform(self, x, inverse=False): """Forward and inverse transform. Args: x : ndarray of shape (n_samples, n_features) The data used to scale along the features axis. inverse : bool, default=False If False, apply forward transform. If True, apply inverse transform. Returns: x : ndarray of shape (n_samples, n_features) Projected data """ x = np.array(x) # Explicit copy. for feature_idx in range(x.shape[1]): x[:, feature_idx] = self._transform_col( x[:, feature_idx], self.quantiles_[:, feature_idx], inverse) return x def transform(self, x): """Feature-wise transformation of the data.""" return self._transform(x, inverse=False) def inverse_transform(self, x): """Back-projection to the original space.""" return self._transform(x, inverse=True) def fit_transform(self, x): """Fit and transform.""" return self.fit(x).transform(x) def compute_dataset_statistics(data_provider, batch_size=1, power_frame_size=1024, power_frame_rate=50): """Calculate dataset stats. Args: data_provider: A DataProvider from ddsp.training.data. batch_size: Iterate over dataset with this batch size. power_frame_size: Calculate power features on the fly with this frame size. power_frame_rate: Calculate power features on the fly with this frame rate. Returns: Dictionary of dataset statistics. This is an overcomplete set of statistics, as there are now several different tone transfer implementations (js, colab, vst) that need different statistics for normalization. """ print('Calculating dataset statistics for', data_provider) data_iter = iter(data_provider.get_batch(batch_size, repeats=1)) # Unpack dataset. i = 0 loudness = [] power = [] f0 = [] f0_conf = [] audio = [] for batch in data_iter: loudness.append(batch['loudness_db']) power.append( spectral_ops.compute_power(batch['audio'], frame_size=power_frame_size, frame_rate=power_frame_rate)) f0.append(batch['f0_hz']) f0_conf.append(batch['f0_confidence']) audio.append(batch['audio']) i += 1 print(f'Computing statistics for {i * batch_size} examples.') loudness = np.vstack(loudness) power = np.vstack(power) f0 = np.vstack(f0) f0_conf = np.vstack(f0_conf) audio = np.vstack(audio) # Fit the transform. trim_end = 20 f0_trimmed = f0[:, :-trim_end] pitch_trimmed = hz_to_midi(f0_trimmed) power_trimmed = power[:, :-trim_end] loudness_trimmed = loudness[:, :-trim_end] f0_conf_trimmed = f0_conf[:, :-trim_end] # Detect notes. mask_on, _ = detect_notes(loudness_trimmed, f0_conf_trimmed) quantile_transform = fit_quantile_transform(loudness_trimmed, mask_on) # Pitch statistics. def get_stats(x, prefix='x', note_mask=None): if note_mask is None: mean_max = np.mean(np.max(x, axis=-1)) mean_min = np.mean(np.min(x, axis=-1)) else: max_list = [] for x_i, m in zip(x, note_mask): if np.sum(m) > 0: max_list.append(np.max(x_i[m])) mean_max = np.mean(max_list) min_list = [] for x_i, m in zip(x, note_mask): if np.sum(m) > 0: min_list.append(np.min(x_i[m])) mean_min = np.mean(min_list) x = x[note_mask] return { f'mean_{prefix}': np.mean(x), f'max_{prefix}': np.max(x), f'min_{prefix}': np.min(x), f'mean_max_{prefix}': mean_max, f'mean_min_{prefix}': mean_min, f'std_{prefix}': np.std(x) } ds_stats = {} ds_stats.update(get_stats(pitch_trimmed, 'pitch')) ds_stats.update(get_stats(power_trimmed, 'power')) ds_stats.update(get_stats(loudness_trimmed, 'loudness')) ds_stats.update(get_stats(pitch_trimmed, 'pitch_note', mask_on)) ds_stats.update(get_stats(power_trimmed, 'power_note', mask_on)) ds_stats.update(get_stats(loudness_trimmed, 'loudness_note', mask_on)) ds_stats['quantile_transform'] = quantile_transform return ds_stats # ------------------------------------------------------------------------------ # Loudness Normalization # ------------------------------------------------------------------------------ def smooth(x, filter_size=3): """Smooth 1-d signal with a box filter.""" x = tf.convert_to_tensor(x, tf.float32) is_2d = len(x.shape) == 2 x = x[:, :, tf.newaxis] if is_2d else x[tf.newaxis, :, tf.newaxis] w = tf.ones([filter_size])[:, tf.newaxis, tf.newaxis] / float(filter_size) y = tf.nn.conv1d(x, w, stride=1, padding='SAME') y = y[:, :, 0] if is_2d else y[0, :, 0] return y.numpy()
""" This modules handles HTTP RPC requests, by exposing them via proxies. """ import requests from io import BytesIO class ProxyError(Exception): """ Raised when the HTTP server returns an error code. """ class Proxy: """ A proxy is a wrapper around an HTTP call to an specific path. When you have a ``proxy`` for an path e.g. ``/api/v0/`` you can get the proxy for ``/api/v0/block`` either by accessing an attribute ``proxy.block`` or by accessing an item ``proxy["block"]``. """ def __init__(self, rootProxy, path): self.rootProxy = rootProxy self.path = path def _get(self, name): """ Return a proxy for a child with the given name :param name: Name of child :return: Proxy for child """ return Proxy(self.rootProxy, "{}/{}".format(self.path, name)) def __getattr__(self, name): return self._get(name) def __getitem__(self, name): return self._get(name) def __call__(self, *args, _in = None, **opts): """ Call the HTTP RPC method referenced by this proxy. This is done by passing the path, arguments, options and input stream to an internal method of the root proxy. """ return self.rootProxy._call_endpoint(self.path, args, opts, _in) def __repr__(self): return "Proxy({!r})".format(self.path) def with_inputenc(self, inputenc): """ Return a proxy with the same path, but with the input wrapped with the specified encoding. :param inputenc: The input encoding that will be applied to any input before sending the HTTP request. :return: A new proxy with the same path but, but with the specified input encoding. """ return InputEncodingProxy(self, inputenc) def with_outputenc(self, outputenc): """ Return a proxy with the same path, but with the output wrapped with the specified encoding. :param outputenc: The output encoding that will be applied to any output before sending the HTTP request. :return: A new proxy with the same path but, but with the specified output encoding. """ return OutputEncodingProxy(self, outputenc) class InputEncodingProxy(Proxy): """ A proxy that handles input encoding. """ def __init__(self, parent, inputenc): Proxy.__init__(self, parent.rootProxy, parent.path) self.parent = parent self.inputenc = inputenc def __call__(self, *args, _in = None, **opts): if (_in): # TODO: jgraef: Replace this with something better # # todo: mec-is: I will if you first will let me know what this # callable is supposed to be doing by writing a decent docstring... (; # # NOTE: inputenc.dump expects a writable stream for output, but # calling a proxy with input expects a readable stream for input. # Is there a stream type, which is readable on one end and writable # on the other, but doesn't buffer all the data? buf = BytesIO() _in = self.inputenc.dump(_in, buf) buf.seek(0) _in = buf opts["inputenc"] = self.inputenc.name return self.parent(*args, _in = _in, **opts) class OutputEncodingProxy(Proxy): """ A proxy that handles input encoding. """ def __init__(self, parent, outputenc): Proxy.__init__(self, parent.rootProxy, parent.path) self.parent = parent self.outputenc = outputenc def __call__(self, *args, _in = None, **opts): opts["encoding"] = self.outputenc.name out = self.parent(*args, _in = _in, **opts) return self.outputenc.load(out) class HttpProxy: """ The root proxy which offers the root attribute from which all proxies are derived. This class also actually does the work of doing an HTTP request. """ ENDPOINT = "/api/v0" """ The api endpoint we're using. Currently API v0. """ DEBUG = False """ Whether to output debugging information for HTTP requests. """ def __init__(self, host, port): """ Create an instance of a HTTPProxy. All method calls will be executed through HTTP requests. """ self.base_url = "http://{}:{:d}{}".format(host, port, self.ENDPOINT) self.root = Proxy(self, "") self.session = requests.Session() def _call_endpoint(self, path, args, opts, f_in): """ Actually perform an HTTP request thus calling an HTTP RPC function :param path: The path to the RPC function relative to the API endpoint :param args: Iterable of arguments that will be passed as arguments to the RPC function. This is like passing a normal argument to an ipfs shell command. :param opts: A dictionary of options for that RPC call. This is like passing a long option (e.g. --recursive true) to an ipfs shell command. :param f_in: Optional input stream. This is like piping a file into an ipfs shell command. :return: A readable file-like object with the return data of the RPC call, i.e. the body of the HTTP response. :raise: Raises a :py:exc:ProxyError if the server responds with an error code """ params = [] for arg in args: if (arg != None): params.append(("arg", arg)) for opt_key, opt_val in opts.items(): if (opt_val != None): params.append((opt_key, str(opt_val))) url = self.base_url + path # TODO: Support multiple input files if (f_in): input_files = [("data", ("data", f_in, "application/octet-stream"))] method = "POST" else: input_files = None method = "GET" if (self.DEBUG): print() print("Request: {} {}".format(method, "(has body)" if (f_in) else "")) print("URL: {}".format(url)) if (params): print("Parameters:") for k, v in params: print(" {}: {}".format(k, v)) print() resp = self.session.request(method, url, params = params, files = input_files, stream = True) if (resp.status_code != 200): raise ProxyError(resp.text) if (self.DEBUG): print("Response Headers:") for k, v in resp.headers.items(): print("{}: {}".format(k, v)) print() return resp.raw __all__ = [ "ProxyError", "Proxy", "InputEncodingProxy", "OutputEncodingProxy", "HttpProxy" ]
# -*- coding: utf-8 -*- """Processes GCP cloud disks using Turbinia.""" import getpass import os import tempfile # We import a class to avoid importing the whole turbinia module. from turbinia import TurbiniaException from turbinia import client as turbinia_client from turbinia import config as turbinia_config from turbinia import evidence, output_manager from turbinia.message import TurbiniaRequest from dftimewolf.lib import module from dftimewolf.lib.containers import containers from dftimewolf.lib.modules import manager as modules_manager # pylint: disable=no-member class TurbiniaProcessor(module.BaseModule): """Processes Google Cloud (GCP) disks with Turbinia. Attributes: client (TurbiniaClient): Turbinia client. disk_name (str): name of the disk to process. instance (str): name of the Turbinia instance project (str): name of the GPC project containing the disk to process. turbinia_region (str): GCP region in which the Turbinia server is running. turbinia_zone (str): GCP zone in which the Turbinia server is running. """ def __init__(self, state, critical=False): """Initializes a Turbinia Google Cloud (GCP) disks processor. Args: state (DFTimewolfState): recipe state. critical (Optional[bool]): True if the module is critical, which causes the entire recipe to fail if the module encounters an error. """ super(TurbiniaProcessor, self).__init__(state, critical=critical) self._output_path = None self.client = None self.disk_name = None self.instance = None self.project = None self.turbinia_region = None self.turbinia_zone = None self.sketch_id = None self.run_all_jobs = None # pylint: disable=arguments-differ def SetUp(self, disk_name, project, turbinia_zone, sketch_id, run_all_jobs): """Sets up the object attributes. Args: disk_name (str): name of the disk to process. project (str): name of the GPC project containing the disk to process. turbinia_zone (str): GCP zone in which the Turbinia server is running. sketch_id (int): The Timesketch sketch id run_all_jobs (bool): Whether to run all jobs instead of a faster subset. """ # TODO: Consider the case when multiple disks are provided by the previous # module or by the CLI. if project is None or turbinia_zone is None: self.state.AddError( 'project or turbinia_zone are not all specified, bailing out', critical=True) return self.disk_name = disk_name self.project = project self.turbinia_zone = turbinia_zone self.sketch_id = sketch_id self.run_all_jobs = run_all_jobs try: turbinia_config.LoadConfig() self.turbinia_region = turbinia_config.TURBINIA_REGION self.instance = turbinia_config.PUBSUB_TOPIC if turbinia_config.TURBINIA_PROJECT != self.project: self.state.AddError( 'Specified project {0!s} does not match Turbinia configured ' 'project {1!s}. Use gcp_turbinia_import recipe to copy the disk ' 'into the same project.'.format( self.project, turbinia_config.TURBINIA_PROJECT), critical=True) return self._output_path = tempfile.mkdtemp() self.client = turbinia_client.TurbiniaClient() except TurbiniaException as exception: # TODO: determine if exception should be converted into a string as # elsewhere in the codebase. self.state.AddError(exception, critical=True) return def _DeterminePaths(self, task_data): """Builds lists of local and remote paths from data retured by Turbinia. This finds all .plaso, hashes.json, and BinaryExtractorTask files in the Turbinia output, and determines if they are local or remote (it's possible this will be running against a local instance of Turbinia). Args: task_data (list[dict]): List of dictionaries representing Turbinia task data. Returns: tuple[list, list]: A tuple of two lists. The first element contains the local paths, the second element contains the remote (GS) paths. """ local_paths = [] gs_paths = [] for task in task_data: # saved_paths may be set to None saved_paths = task.get('saved_paths') or [] for path in saved_paths: if path.endswith('.plaso') or \ path.endswith('BinaryExtractorTask.tar.gz') or \ path.endswith('hashes.json'): if path.startswith('gs://'): gs_paths.append(path) else: local_paths.append(path) return local_paths, gs_paths def _DownloadFilesFromGCS(self, timeline_label, gs_paths): """Downloads files stored in Google Cloud Storage to the local filesystem. Args: timeline_label (str): Label to use to construct the path list. gs_paths (str): gs:// URI to files that need to be downloaded from GS. Returns: list(str): A list of local paths were GS files have been copied to. """ # TODO: Externalize fetching files from GCS buckets to a different module. local_paths = [] for path in gs_paths: local_path = None try: output_writer = output_manager.GCSOutputWriter( path, local_output_dir=self._output_path) local_path = output_writer.copy_from(path) except TurbiniaException as exception: # Don't add a critical error for now, until we start raising errors # instead of returning manually each self.state.AddError(exception, critical=False) if local_path: local_paths.append((timeline_label, local_path)) return local_paths def Process(self): """Process files with Turbinia.""" log_file_path = os.path.join(self._output_path, 'turbinia.log') print('Turbinia log file: {0:s}'.format(log_file_path)) vm_containers = self.state.GetContainers(containers.ForensicsVM) if vm_containers and not self.disk_name: forensics_vm = vm_containers[0] self.disk_name = forensics_vm.evidence_disk.name print('Using disk {0:s} from previous collector'.format(self.disk_name)) evidence_ = evidence.GoogleCloudDisk( disk_name=self.disk_name, project=self.project, zone=self.turbinia_zone) try: evidence_.validate() except TurbiniaException as exception: self.state.AddError(exception, critical=True) return request = TurbiniaRequest(requester=getpass.getuser()) request.evidence.append(evidence_) if self.sketch_id: request.recipe['sketch_id'] = self.sketch_id if not self.run_all_jobs: # TODO(aarontp): Remove once the release with # https://github.com/google/turbinia/pull/554 is live. request.recipe['jobs_blacklist'] = [ 'StringsJob', 'BinaryExtractorJob', 'BulkExtractorJob', 'PhotorecJob'] request.recipe['jobs_denylist'] = [ 'StringsJob', 'BinaryExtractorJob', 'BulkExtractorJob', 'PhotorecJob'] # Get threat intelligence data from any modules that have stored some. # In this case, observables is a list of containers.ThreatIntelligence # objects. threatintel = self.state.GetContainers(containers.ThreatIntelligence) if threatintel: print('Sending {0:d} threatintel to Turbinia GrepWorkers...'.format( len(threatintel))) indicators = [item.indicator for item in threatintel] request.recipe['filter_patterns'] = indicators request_dict = { 'instance': self.instance, 'project': self.project, 'region': self.turbinia_region, 'request_id': request.request_id } try: print('Creating Turbinia request {0:s} with Evidence {1!s}'.format( request.request_id, evidence_.name)) self.client.send_request(request) print('Waiting for Turbinia request {0:s} to complete'.format( request.request_id)) self.client.wait_for_request(**request_dict) task_data = self.client.get_task_data(**request_dict) except TurbiniaException as exception: # TODO: determine if exception should be converted into a string as # elsewhere in the codebase. self.state.AddError(exception, critical=True) return message = self.client.format_task_status(**request_dict, full_report=True) short_message = self.client.format_task_status(**request_dict) print(short_message) # Store the message for consumption by any reporting modules. report = containers.Report( module_name='TurbiniaProcessor', text=message, text_format='markdown') self.state.StoreContainer(report) local_paths, gs_paths = self._DeterminePaths(task_data) if not local_paths and not gs_paths: self.state.AddError( 'No interesting files found in Turbinia output.', critical=True) return timeline_label = '{0:s}-{1:s}'.format(self.project, self.disk_name) # Any local files that exist we can add immediately to the output all_local_paths = [ (timeline_label, p) for p in local_paths if os.path.exists(p)] downloaded_gs_paths = self._DownloadFilesFromGCS(timeline_label, gs_paths) all_local_paths.extend(downloaded_gs_paths) if not all_local_paths: self.state.AddError('No interesting files could be found.', critical=True) for description, path in all_local_paths: if path.endswith('BinaryExtractorTask.tar.gz'): container = containers.ThreatIntelligence( name='BinaryExtractorResults', indicator=None, path=path) if path.endswith('hashes.json'): container = containers.ThreatIntelligence( name='ImageExportHashes', indicator=None, path=path) if path.endswith('.plaso'): container = containers.File(name=description, path=path) self.state.StoreContainer(container) modules_manager.ModulesManager.RegisterModule(TurbiniaProcessor)
from __future__ import print_function from __future__ import division import os import time import numpy as np import numpy.ma as ma from scipy.misc import imsave from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib # Force matplotlib to not use any Xwindows backend. # See- https://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm import cv2 from sklearn.preprocessing import StandardScaler import consts import utils USE_OPENCV = False def imshow(win_title, img, use_opencv=None): if use_opencv is None: use_opencv = USE_OPENCV img_range = (np.min(img), np.max(img)) if img.dtype in [np.float, np.float32, np.float64] and (img_range[0] < 0 or img_range[1] > 1): print("Floating image not in [0, 1]. Converting to [0, 1]...") img = utils.min_max_scale(img, img_range, (0, 1)) if use_opencv: cv2.imshow(win_title, img) key = cv2.waitKey(0) & 0xFF return key else: # rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB if len(img.shape) == 3 and img.shape[2] == 3: rgb = np.zeros_like(img) rgb[:, :, 0] = img[:, :, 2] rgb[:, :, 1] = img[:, :, 1] rgb[:, :, 2] = img[:, :, 0] else: rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) plt.title(win_title) plt.imshow(rgb) plt.show() return 27 # Esc - Will show only 1st window def multiple_plots(figure_num, nrows, ncols, plot_number): plt.figure(figure_num) plt.subplot(nrows, ncols, plot_number) def plot_hist(x, normed, bins, title='Histogram', ylabel=None, show=True, save_path=None): print ("Plot: " + title) if ylabel is None and normed: ylabel = 'Probability' plt.title(title) plt.hist(x, normed=normed, bins=bins) plt.ylabel(ylabel) if save_path is not None: plt.savefig(save_path) if show: plt.show() print ("Done plot_hist: " + title) def plot_2d_hist(x, y, normed, bins, title='Histogram', xlabel=None, ylabel=None, xlim=None, ylim=None, show=True, save_path=None): print ("Plot: " + title) heatmap, xedges, yedges = np.histogram2d(x, y, normed=normed, bins=bins) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] plt.title(title) cax = plt.imshow(heatmap.T, cmap='jet', aspect='auto', extent=extent, origin='lower') plt.colorbar(cax) plt.xlabel(xlabel) plt.ylabel(ylabel) if xlim is not None: plt.xlim(xlim) if ylim is not None: plt.ylim(ylim) if save_path is not None: plt.savefig(save_path) if show: plt.show() print ("Done plot_hist: " + title) def plot_line(x, title='Line', ylabel=None, show=True): print ("Plot: " + title) if ylabel is None: ylabel = 'Value' plt.plot(x) plt.ylabel(ylabel) if show: plt.show() print("Done plot_line: " + title) return plt def render_view(pose): if len(pose) != 4: raise Exception("Pose must be of the form (x,y,yaw,pitch)") view_str = "%f %f %f %f %f %f" % (pose[0], pose[1], 9999, pose[2], pose[3], 0) print("Calling PROJECT with pose %s" % view_str) from subprocess import call file_path = '/home/moti/cg/project/meshNet/lastEdgeView.png' call(['../project', '../../berlin/berlin.obj', '-single=' + file_path, '-output_dir=temp', '-pose=' + view_str]) img = cv2.imread(file_path) # utils.rm_file(file_path) return img class OrthoData: def __init__(self, data_dir): self.filepath = os.path.join(data_dir, 'map_ortho_data.txt') self.left, self.right, self.bottom, self.top, self.neardist, self.fardist = self.read() def read(self): with open(self.filepath) as f: line = f.readline() ortho_data_str = line[line.find('=(') + 2:len(line) - 1] ortho_data_list = [] for num in ortho_data_str.split(','): ortho_data_list.append(float(num)) return tuple(ortho_data_list) def convert_world_point_to_map(self, p, map_shape): map_x = (p[0] - self.left) / (self.right - self.left) * map_shape[1] map_y = (p[1] - self.bottom) / (self.top - self.bottom) * map_shape[0] return int(round(map_x)), int(round(map_y)) def get_map_view(data_dir, p1, p2, roi=None): map_file_path = os.path.join(data_dir, 'gSamplesMap.png') img_map = cv2.imread(map_file_path) orth_data = OrthoData(data_dir) map_p1 = orth_data.convert_world_point_to_map(p1, img_map.shape) map_p2 = orth_data.convert_world_point_to_map(p2, img_map.shape) # cv_azul_color = (255, 255, 0) # cv_green_color = (0, 255, 0) cv_red_color = (0, 0, 255) cv_yellow_color = (45, 205, 243) cv2.circle(img_map, center=map_p1, radius=12, color=cv_yellow_color, thickness=cv2.FILLED) cv2.circle(img_map, center=map_p2, radius=12, color=cv_red_color, thickness=cv2.FILLED) if roi is not None: left, top, width, height = roi img_map = img_map[top:top + height, left:left + width] return img_map # make view_prediction() local: # import cv2 # import matplotlib.pyplot as plt # from visualize import render_view # from visualize import get_map_view # from visualize import multiple_plots def view_prediction(data_dir, roi, loader, y_train_pred, y_test_pred, errors_by, idx, is_train=True, normalized=False, asc=True, figure_num=99): if is_train: y = loader.y_train y_pred = y_train_pred else: y = loader.y_test y_pred = y_test_pred normalized_errors = np.linalg.norm(y - y_pred, axis=-1) if not normalized: y = loader.y_inverse_transform(y) y_pred = loader.y_inverse_transform(y_pred) xy_errors = utils.xy_dist(y[:, :2], y_pred[:, :2]) angle_errors = utils.rotation_error(y[:, 2:], y_pred[:, 2:], normalized) if errors_by == 'xy': errors = xy_errors elif errors_by == 'angle': errors = angle_errors elif errors_by == 'comb': errors = normalized_errors else: raise Exception("Unknown errors_by argument") # Convert 'y' to (x,y,yaw,pitch) as this is the current visualization # Another possibility would be to convert everything to quaternions and calculate the joint-rotation-angle error if y.shape[1] == 4: # 'angle' pass elif y.shape[1] == 6: # 'quaternion' y = utils.convert_quaternion_y_to_yaw_pitch(y) y_pred = utils.convert_quaternion_y_to_yaw_pitch(y_pred) else: raise Exception("Only 'angle' and 'quaternion' are currently supported") sort_idx = np.argsort(errors) if asc else np.argsort(errors)[::-1] y_single = y[sort_idx][idx] y_pred_single = y_pred[sort_idx][idx] if normalized: raise Exception("Normalized is currently not supported") img_org = render_view(y_single) img_pred = render_view(y_pred_single) img_map = get_map_view(data_dir, y_single[:2], y_pred_single[:2], roi) upper_part_height = int(0.33333 * img_org.shape[0]) cv_red_color = (0, 0, 255) cv2.line(img_org, (0, upper_part_height), (img_org.shape[1], upper_part_height), cv_red_color, 3) cv2.line(img_pred, (0, upper_part_height), (img_org.shape[1], upper_part_height), cv_red_color, 3) img_org = cv2.cvtColor(img_org, cv2.COLOR_BGR2RGB) img_pred = cv2.cvtColor(img_pred, cv2.COLOR_BGR2RGB) # The complex line efficient by far (It avoids copying the sorted image array) # x_input = loader.x_train[sort_idx][idx] x_input = loader.x_train[np.arange(len(loader.x_train))[sort_idx][idx]] img_train = cv2.cvtColor(x_input, cv2.COLOR_GRAY2RGB) img_map = cv2.cvtColor(img_map, cv2.COLOR_BGR2RGB) np.set_printoptions(formatter={'float_kind': lambda x: "%.2f" % x}) multiple_plots(figure_num, 2, 2, 1) plt.imshow(img_org, interpolation='bilinear') plt.title('Original %s' % y_single) multiple_plots(figure_num, 2, 2, 2) plt.imshow(img_pred, interpolation='bilinear') plt.title('Estimation %s' % y_pred_single) np.set_printoptions() # Reset multiple_plots(figure_num, 2, 2, 3) plt.imshow(img_train) plt.title('NN Input') multiple_plots(figure_num, 2, 2, 4) plt.imshow(img_map) plt.title('Map') angle_diff = utils.angle_diff(y_single[2:], y_pred_single[2:]) plt.suptitle("idx = %i/%i (%s,%s), errors: xy=%s, yaw=%s, pitch=%s, angle_l2=%s, comb=%s" % (idx if asc else len(errors) - 1 - idx, len(errors) - 1, 'asc' if asc else 'desc', errors_by, xy_errors[sort_idx][idx], angle_diff[0], angle_diff[1], angle_errors[sort_idx][idx], normalized_errors[sort_idx][idx])) # , fontsize=16) plt.show() def show_predictions(x, y, prediction, file_urls, scaler, encoder, resize_factor=4, title="show_image"): key = None if scaler is None: scaler = StandardScaler() scaler.mean_ = np.zeros(x.shape[1:], np.float32).flatten() * 128. scaler.scale_ = np.full(x.shape[1:], 255., dtype=np.float32).flatten() orig_shape = x.shape x_flatten = x.reshape(len(x), -1) x_flatten = scaler.inverse_transform(x_flatten) x = x_flatten.reshape(orig_shape) x = x.astype('uint8') i = 0 while key != 27: if key == ord('n') and i < x.shape[0] - 1: i += 1 elif key == ord('p') and i > 0: i -= 1 elif key == ord('b'): resize_factor *= 2 elif key == ord('s'): resize_factor /= 2 elif key == ord('o'): cv2.imshow(title, cv2.imread(file_urls[i])) cv2.waitKey(0) img = utils.get_image_from_batch(x, i) show_img = cv2.resize(img, (0, 0), fx=resize_factor, fy=resize_factor) text = "class {0} ->".format(encoder.inverse_transform(y[i])) cv2.putText(show_img, text, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4 if img.shape[1] > 30 else 0.35, (255, 255, 255), 1, cv2.LINE_AA) text = "predicted {0}, factor {1}".format(encoder.inverse_transform(prediction[i]), resize_factor) cv2.putText(show_img, text, (20, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.4 if img.shape[1] > 30 else 0.35, (255, 255, 255), 1, cv2.LINE_AA) cv2.putText(show_img, file_urls[i], (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.4 if img.shape[1] > 30 else 0.3, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow(title, show_img) print("Current file: " + file_urls[i] + " 'n'-next, 'p'-prev, 'b'-bigger, 's'-smaller, 'o'-open original, Esc to Exit") key = cv2.waitKey(0) & 255 # For some reason I sometime get a very large number and not the clean ASCII # FIXME: Add border_color instead of bg_color - low priority def show_data(x, offset=0, h_axis_num=None, v_axis_num=None, border_size=1, bg_color=(0, 0, 0), write=None): key = None done = False img_rows = x.shape[1] img_cols = x.shape[2] img_channels = x.shape[3] if v_axis_num is None: v_axis_num = 600 // img_rows if h_axis_num is None: h_axis_num = 800 // img_cols # (images_num_per_axis-1) is added form grid lines images = np.zeros((img_rows * v_axis_num + (v_axis_num - 1) * border_size, img_cols * h_axis_num + (h_axis_num - 1) * border_size, 3), np.uint8) images[:] = bg_color if x.dtype == np.float32: mean_img = np.full(x.shape[1:], np.abs(np.min(x)), np.float32) # Handle e.g. (0, 1) or (-0.5, 0.5) scale_img = np.full(x.shape[1:], 255, np.float32) else: scale_img = np.full(x.shape[1:], 1, np.float32) mean_img = np.zeros(x.shape[1:], np.float32) while key != 27 and not done: # 27 is Esc key for row in range(v_axis_num): for col in range(h_axis_num): cur_idx = offset + row * h_axis_num + col if cur_idx >= x.shape[0]: done = True break cur_img = ((x[cur_idx, ...] + mean_img) * scale_img).astype('uint8') if img_channels == 1: cur_img = cv2.cvtColor(cur_img, cv2.COLOR_GRAY2RGB) images[row * img_rows + row * border_size:row * img_rows + img_rows + row * border_size, col * img_cols + col * border_size:col * img_cols + img_cols + col * border_size, :] = cur_img current_images = str(offset) + "-" + str(offset + v_axis_num * h_axis_num - 1) + "_of_" + str(x.shape[0]) title = "show_data_" + current_images # cv2.namedWindow(title) # cv2.moveWindow(title, 50, 50) key = imshow(title, images) print("Images: " + current_images + ". Press Esc to exit or any other key to continue") # key = cv2.waitKey(0) & 0xFF if write: image_path = os.path.join(write, title + ".png") cv2.imwrite(image_path, images) offset += v_axis_num * h_axis_num images[:] = bg_color # cv2.destroyWindow(title) def visualize_history(history, sess_info, render_to_screen=True): try: print("History results-") print(history.history) print("") for d in history.history: print("%s = %s" % (d, history.history[d])) target_names = ['training-set', 'validation-set'] fig = plt.figure() fig.suptitle(sess_info.title) if "acc" in history.history.keys(): ax = fig.add_subplot(121) else: ax = fig.add_subplot(111) ax.plot(history.epoch, history.history['loss'], 'r', label=target_names[0]) ax.plot(history.epoch, history.history['val_loss'], 'g', label=target_names[1]) ax.legend() ax.set_ylim(ymin=0, ymax=3 * max(history.history['val_loss'])) # Avoid very high values 'loss' might starts with ax.set_title('Loss (train [%.2f, %.2f], val [%.2f, %.2f])' % (min(history.history['loss']), max(history.history['loss']), min(history.history['val_loss']), max(history.history['val_loss']))) if "acc" in history.history.keys(): ax = fig.add_subplot(122) ax.plot(history.epoch, history.history['acc'], 'r', label=target_names[0]) ax.plot(history.epoch, history.history['val_acc'], 'g', label=target_names[1]) ax.legend() ax.set_title('Accuracy') # Save history plot to disk history_plot_fname = sess_info.title + '_history_plot.png' history_plot_full_path = os.path.join(consts.OUTPUT_DIR, sess_info.out_dir, history_plot_fname) plt.savefig(history_plot_full_path) if render_to_screen: plt.show() except Exception as e: print("Warning: {}".format(e)) def tensor_2d_to_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to image array x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x def tensor_3d_to_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x # Works best if creating model only up to the layer we want to visualize. # Not sure why - something with the derivatives behaves better. def visualize_layer_by_maximize_gradients_wrt_input(model, layer_name, input_shape, number_of_filter_to_display_in_each_axis): # get the symbolic outputs of each "key" layer (we gave them unique names) layer_dict = dict([(layer.name, layer) for layer in model.layers]) from keras import backend as K # Input size for the model img_ndim = input_shape[0] img_height = input_shape[1] img_width = input_shape[2] # Input image with which to derive the layer we visualize input_img = model.layers[0].input # Layer to visualize layer_output = layer_dict[layer_name].output nb_filters = layer_output._keras_shape[1] filter_width = layer_output._keras_shape[2] filter_height = layer_output._keras_shape[3] def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) kept_filters = [] # for filter_index in range(0, 10): for filter_index in range(0, nb_filters): print('Processing filter %d' % filter_index) start_time = time.time() # we build a loss function that maximizes the activation # of the nth filter of the layer considered loss = K.mean(layer_output[:, filter_index, :, :]) # we compute the gradient of the input picture wrt this loss grads = K.gradients(loss, input_img)[0] # normalization trick: we normalize the gradient grads = normalize(grads) # this function returns the loss and grads given the input picture iterate = K.function([input_img, K.learning_phase()], [loss, grads]) # step size for gradient ascent step = 1. # we start from a gray image with some random noise input_img_data = (np.random.random((1, img_ndim, img_width, img_height)) - 0.5) * 20 # we run gradient ascent for 20 steps for i in range(20): loss_value, grads_value = iterate([input_img_data, 0]) input_img_data += grads_value * step print('Current loss value:', loss_value) # decode the resulting input image if loss_value > 0: img = tensor_3d_to_image(input_img_data[0]) kept_filters.append((img, loss_value)) end_time = time.time() print('Filter %d processed in %ds' % (filter_index, end_time - start_time)) # we will stitch the best 64 filters on a 8 x 8 grid. n = number_of_filter_to_display_in_each_axis # the filters that have the highest loss are assumed to be better-looking. # we will only keep the top 64 filters. print(len(kept_filters)) kept_filters.sort(key=lambda x: x[1], reverse=True) kept_filters = kept_filters[:n * n] # build a black picture with enough space for our filters with a 5px margin in between margin = 5 width = n * img_width + (n - 1) * margin height = n * img_height + (n - 1) * margin stitched_filters = np.zeros((width, height, img_ndim)) # fill the picture with our saved filters for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width, (img_height + margin) * j: (img_height + margin) * j + img_height, :] = img # save the result to disk if not os.path.isdir('outputs/'): os.mkdir('outputs/') if stitched_filters.shape[2] == 1: stitched_filters = np.repeat(stitched_filters, repeats=3, axis=2) imsave('outputs/stitched_filters_%dx%d.png' % (n, n), stitched_filters) def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None): """Wrapper around pl.imshow""" if cmap is None: cmap = cm.jet if vmin is None: vmin = data.min() if vmax is None: vmax = data.max() import pylab as pl divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap) pl.colorbar(im, cax=cax) def make_mosaic(imgs, nrows, ncols, border=1): """ Given a set of images with all the same shape, makes a mosaic with nrows and ncols """ nimgs = imgs.shape[0] imshape = imgs.shape[1:] mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border, ncols * imshape[1] + (ncols - 1) * border), dtype=np.float32) paddedh = imshape[0] + border paddedw = imshape[1] + border for i in range(nimgs): row = int(np.floor(i / ncols)) col = i % ncols mosaic[row * paddedh:row * paddedh + imshape[0], col * paddedw:col * paddedw + imshape[1]] = imgs[i] return mosaic def visualize_layer_by_plotting_weights(model, layer_name): # Works only for 3D layers (such as conv layer on a 1D input) # get the symbolic outputs of each "key" layer (we gave them unique names). layer_dict = dict([(layer.name, layer) for layer in model.layers]) import pylab as pl # Visualize weights layer = layer_dict[layer_name] w = layer.W.get_value() w = np.squeeze(w) print("W shape : ", w.shape) pl.figure(figsize=(15, 15)) pl.title('conv1 weights') nice_imshow(pl.gca(), make_mosaic(w, 6, 6), cmap='gray') def visualize_layer_by_input_images(model, layer_name, input_images_t, number_of_filter_to_display_in_each_axis): # get the symbolic outputs of each "key" layer (we gave them unique names). layer_dict = dict([(layer.name, layer) for layer in model.layers]) from keras import backend as K # Input size for the model img_ndim = input_images_t[0].shape[0] img_height = input_images_t[0].shape[1] img_width = input_images_t[0].shape[2] # Input image with which to predict input_img = model.layers[0].input # Layer to visualize layer_output = layer_dict[layer_name].output nb_filters = layer_output._keras_shape[1] filter_width = layer_output._keras_shape[2] filter_height = layer_output._keras_shape[3] def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) kept_filters = [] for filter_index in range(0, nb_filters): # for filter_index in range(0, 10): print('Processing filter %d' % filter_index) start_time = time.time() # we build a loss function that maximizes the activation # of the nth filter of the layer considered loss = K.mean(K.square(layer_output[:, filter_index, :, :])) # this function returns the loss and grads given the input picture iterate = K.function([input_img, K.learning_phase()], [loss, layer_output]) loss_value, layer_out = iterate([input_images_t, 0]) # decode the resulting input image if loss_value > 0: img = tensor_3d_to_image(layer_out[:, filter_index, :, :]) kept_filters.append((img, loss_value)) end_time = time.time() print('Filter %d processed in %ds' % (filter_index, end_time - start_time)) # we will stitch the best 64 filters on a 8 x 8 grid. n = number_of_filter_to_display_in_each_axis # the filters that have the highest loss are assumed to be better-looking. # we will only keep the top 64 filters. print(len(kept_filters)) kept_filters.sort(key=lambda x: x[1], reverse=True) kept_filters = kept_filters[:n * n] # build a black picture with enough space for our filters with a 5px margin in between margin = 5 width = n * filter_width + (n - 1) * margin height = n * filter_height + (n - 1) * margin stitched_filters = np.zeros((width, height, img_ndim)) # fill the picture with our saved filters for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] stitched_filters[(filter_width + margin) * i: (filter_width + margin) * i + filter_width, (filter_height + margin) * j: (filter_height + margin) * j + filter_height, :] = img # save the result to disk if not os.path.isdir('outputs/'): os.mkdir('outputs/') if stitched_filters.shape[2] == 1: stitched_filters = np.repeat(stitched_filters, repeats=3, axis=2) imsave('outputs/stitched_filters_inputImage%dx%d.png' % (n, n), stitched_filters)
import numpy as np from matplotlib import pyplot as plt def CalculateBranchingEvidence(d, Bsearch=None): """ :param d: output dictionary from FitModel :param Bsearch: candidate list of branching points :return: posterior probability of branching at each point and log Bayes factor of branching vs not branching """ if Bsearch is None: Bsearch = list(np.linspace(0.05, 0.95, 5)) + [1.1] # Calculate probability of branching at each point o = d["loglik"][:-1] pn = np.exp(o - np.max(o)) p = pn / pn.sum() # normalize # Calculate log likelihood ratio by averaging out o = d["loglik"] Nb = o.size - 1 if Nb != len(Bsearch) - 1: raise NameError( "Passed in wrong length of Bsearch is %g- should be %g" % (len(Bsearch), Nb) ) obj = o[:-1] illmax = np.argmax(obj) llmax = obj[illmax] lratiostable = ( llmax + np.log(1 + np.exp(obj[np.arange(obj.size) != illmax] - llmax).sum()) - o[-1] - np.log(Nb) ) return {"posteriorBranching": p, "logBayesFactor": lratiostable} def PlotBGPFit( GPy, GPt, Bsearch, d, figsize=(5, 5), height_ratios=[5, 1], colorarray=["darkolivegreen", "peru", "mediumvioletred"], ): """ Plot BGP model :param GPt: pseudotime :param GPy: gene expression. Should be 0 mean for best performance. :param Bsearch: list of candidate branching points :param d: output dictionary from FitModel :param figsize: figure size :param height_ratios: ratio of assignment plot vs posterior branching time plot :param colorarray: colors for each branch :return: dictionary of log likelihood, GPflow model, Phi matrix, predictive set of points, mean and variance, hyperparameter values, posterior on branching time """ fig, axa = plt.subplots( 2, 1, figsize=figsize, sharex=True, gridspec_kw={"height_ratios": height_ratios} ) ax = axa[0] y, pt, mul, ttestl = GPy, GPt, d["prediction"]["mu"], d["prediction"]["xtest"] lw = 4 for f in range(3): mu = mul[f] ttest = ttestl[f] col = colorarray[f] # mean.get_color() ax.plot(ttest, mu, linewidth=lw, color=col, alpha=0.7) gp_num = 1 # can be 0,1,2 - Plot against this PhiColor = ax.scatter( pt, y, c=d["Phi"][:, gp_num], vmin=0.0, vmax=1, s=40, alpha=0.7 ) _ = fig.colorbar(PhiColor, ax=ax, orientation="horizontal") ax = axa[1] p = CalculateBranchingEvidence(d, Bsearch)["posteriorBranching"] ax.stem(Bsearch[:-1], p) return fig, axa def plotBranchModel( B, pt, Y, ttestl, mul, varl, Phi, figsizeIn=(5, 5), lw=3.0, fs=10, labels=None, fPlotPhi=True, fPlotVar=False, ax=None, fColorBar=True, colorarray=["darkolivegreen", "peru", "mediumvioletred"], ): """ Plotting code that does not require access to the model but takes as input predictions. """ if ax is None: fig = plt.figure(figsize=figsizeIn) ax = fig.gca() else: fig = plt.gcf() d = 0 # constraint code to be 1D for now for f in range(3): mu = mul[f].numpy() var = varl[f].numpy() ttest = ttestl[f] col = colorarray[f] # mean.get_color() (mean,) = ax.plot(ttest, mu[:, d], linewidth=lw, color=col) if fPlotVar: ax.plot( ttest.flatten(), mu[:, d] + 2 * np.sqrt(var.flatten()), "--", color=col, linewidth=lw, ) ax.plot( ttest, mu[:, d] - 2 * np.sqrt(var.flatten()), "--", color=col, linewidth=lw, ) v = ax.axis() ax.plot([B, B], v[-2:], "-m", linewidth=lw) # Plot Phi or labels if fPlotPhi: gp_num = 1 # can be 0,1,2 - Plot against this PhiColor = ax.scatter(pt, Y[:, d], c=Phi[:, gp_num], vmin=0.0, vmax=1, s=40) if fColorBar: fig.colorbar(PhiColor, label="GP {} assignment probability".format(gp_num)) else: return fig, PhiColor return fig def predictBranchingModel(m, full_cov=False): """ return prediction of branching model """ pt = m.t B = m.kernel.kernels[0].Bv.flatten() l = np.min(pt) u = np.max(pt) mul = list() varl = list() ttestl = list() for f in range(1, 4): if f == 1: ttest = np.linspace(l, B, 100) # [:, None] # root else: ttest = np.linspace(B, u, 100) # [:, None] Xtest = np.hstack((ttest, ttest * 0 + f)) mu, var = m.predict_f(Xtest, full_cov=full_cov) assert np.all(np.isfinite(mu)), "All elements should be finite but are " + str( mu ) assert np.all(np.isfinite(var)), "All elements should be finite but are " + str( var ) mul.append(mu) varl.append(var) ttestl.append(ttest) return ttestl, mul, varl def GetFunctionIndexListGeneral(Xin): """ Function to return index list and input array X repeated as many time as each possible function """ # limited to one dimensional X for now! assert Xin.shape[0] == np.size(Xin) indicesBranch = [] XSample = np.zeros((Xin.shape[0], 2), dtype=float) Xnew = [] inew = 0 functionList = list( range(1, 4) ) # can be assigned to any of root or subbranches, one based counting for ix, x in enumerate(Xin): XSample[ix, 0] = Xin[ix] XSample[ix, 1] = np.random.choice(functionList) # print str(ix) + ' ' + str(x) + ' f=' + str(functionList) + ' ' + str(XSample[ix,1]) idx = [] for f in functionList: Xnew.append([x, f]) # 1 based function list - does kernel care? idx.append(inew) inew = inew + 1 indicesBranch.append(idx) Xnewa = np.array(Xnew) return (Xnewa, indicesBranch, XSample) def SetXExpandedBranchingPoint(XExpanded, B): """ Return XExpanded by removing unavailable branches """ # before branching pt, only function 1 X1 = XExpanded[ np.logical_and(XExpanded[:, 0] <= B, XExpanded[:, 1] == 1).flatten(), : ] # after branching pt, only functions 2 and 2 X23 = XExpanded[ np.logical_and(XExpanded[:, 0] > B, XExpanded[:, 1] != 1).flatten(), : ] return np.vstack([X1, X23])
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members import os import mock from twisted.trial import unittest from twisted.internet import defer from buildslave.test.fake.runprocess import Expect from buildslave.test.util.sourcecommand import SourceCommandTestMixin from buildslave.commands import mtn class TestMonotone(SourceCommandTestMixin, unittest.TestCase): repourl='mtn://code.monotone.ca/sandbox' branch='ca.monotone.sandbox.buildbot' def setUp(self): self.setUpCommand() def tearDown(self): self.tearDownCommand() def patch_sourcedirIsUpdateable(self, result): self.cmd.sourcedirIsUpdateable = lambda : result def test_no_db(self): "Test a basic invocation with mode=copy and no existing sourcedir" self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='copy', revision=None, repourl=self.repourl, branch=self.branch ), # no sourcedata -> will do fresh checkout initial_sourcedata = None, ) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect(['path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 1, Expect(['path/to/mtn', 'db', 'init', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, sendRC=False, usePTY=False, environ=exp_environ) + 1, Expect([ 'clobber', 'workdir' ], self.basedir) + 0, Expect([ 'clobber', 'source' ], self.basedir) + 0, Expect(['path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=none'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'checkout', self.basedir_source, '--db', os.path.join(self.basedir, 'db.mtn'), '--branch', 'ca.monotone.sandbox.buildbot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_source, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, Expect([ 'copy', 'source', 'workdir'], self.basedir) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_db_needs_migrating(self): "Test a basic invocation with mode=copy and no existing sourcedir" self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='copy', revision=None, repourl=self.repourl, branch=self.branch ), # no sourcedata -> will do fresh checkout initial_sourcedata = None, ) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn') ], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + { 'stdout' : 'blah blah (migration needed)\n' } + 0, Expect([ 'path/to/mtn', 'db', 'migrate', '--db', os.path.join(self.basedir, 'db.mtn') ], self.basedir, sendRC=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'clobber', 'workdir' ], self.basedir) + 0, Expect([ 'clobber', 'source' ], self.basedir) + 0, Expect(['path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=none'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'checkout', self.basedir_source, '--db', os.path.join(self.basedir, 'db.mtn'), '--branch', 'ca.monotone.sandbox.buildbot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_source, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, Expect([ 'copy', 'source', 'workdir'], self.basedir) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_db_too_new(self): "Test a basic invocation with mode=copy and no existing sourcedir" self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='copy', revision=None, repourl=self.repourl, branch=self.branch ), # no sourcedata -> will do fresh checkout initial_sourcedata = None, ) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn') ], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + { 'stdout' : 'blah blah (too new, cannot use)\n' } + 0 ] self.patch_runprocess(*expects) d = self.run_command() return self.assertFailure(d, mtn.MonotoneError) def test_run_mode_copy_fresh_sourcedir(self): "Test a basic invocation with mode=copy and no existing sourcedir" self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='copy', revision=None, repourl=self.repourl, branch=self.branch ), # no sourcedata -> will do fresh checkout initial_sourcedata = None, ) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect(['path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'clobber', 'workdir' ], self.basedir) + 0, Expect([ 'clobber', 'source' ], self.basedir) + 0, Expect(['path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=none'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'checkout', self.basedir_source, '--db', os.path.join(self.basedir, 'db.mtn'), '--branch', 'ca.monotone.sandbox.buildbot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_source, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, Expect([ 'copy', 'source', 'workdir'], self.basedir) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_run_mode_copy_update_sourcedir(self): """test a copy where the sourcedata indicates that the source directory can be updated""" self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='copy', revision=None, repourl=self.repourl, branch=self.branch, progress=True, # added here for better coverage ), initial_sourcedata = self.repourl+"?"+self.branch ) self.patch_sourcedirIsUpdateable(True) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'clobber', 'workdir' ], self.basedir) + 0, Expect([ 'path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=dot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'update', '--db', os.path.join(self.basedir, 'db.mtn'), '-r', 'h:ca.monotone.sandbox.buildbot', '-b', 'ca.monotone.sandbox.buildbot'], self.basedir_source, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_source, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, Expect([ 'copy', 'source', 'workdir'], self.basedir) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_run_mode_update_fresh(self): self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='update', revision=None, repourl=self.repourl, branch=self.branch, progress=True, # added here for better coverage ), initial_sourcedata = self.repourl+"?"+self.branch ) self.patch_sourcedirIsUpdateable(False) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'clobber', 'workdir' ], self.basedir) + 0, Expect([ 'path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=dot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'checkout', self.basedir_workdir, '--db', os.path.join(self.basedir, 'db.mtn'), '--branch', 'ca.monotone.sandbox.buildbot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_run_mode_update_existing(self): self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='update', revision=None, repourl=self.repourl, branch=self.branch, progress=True, # added here for better coverage ), initial_sourcedata = self.repourl+"?"+self.branch ) self.patch_sourcedirIsUpdateable(True) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=dot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'update', '--db', os.path.join(self.basedir, 'db.mtn'), '-r', 'h:ca.monotone.sandbox.buildbot', '-b', 'ca.monotone.sandbox.buildbot'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_run_mode_update_existing_known_rev(self): self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='update', revision='abcdef01', repourl=self.repourl, branch=self.branch, progress=True, # added here for better coverage ), initial_sourcedata = self.repourl+"?"+self.branch ) self.patch_sourcedirIsUpdateable(True) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=dot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'update', '--db', os.path.join(self.basedir, 'db.mtn'), '--revision', 'abcdef01', '-b', 'ca.monotone.sandbox.buildbot'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d def test_run_mode_update_existing_unknown_rev(self): self.patch_getCommand('mtn', 'path/to/mtn') self.clean_environ() self.make_command(mtn.Monotone, dict( workdir='workdir', mode='update', revision='abcdef01', repourl=self.repourl, branch=self.branch, progress=True, # added here for better coverage ), initial_sourcedata = self.repourl+"?"+self.branch ) self.patch_sourcedirIsUpdateable(True) exp_environ = dict(PWD='.', LC_MESSAGES='C') expects = [ Expect([ 'path/to/mtn', 'db', 'info', '--db', os.path.join(self.basedir, 'db.mtn')], self.basedir, keepStdout=True, sendRC=False, sendStderr=False, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=dot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect([ 'path/to/mtn', 'update', '--db', os.path.join(self.basedir, 'db.mtn'), '--revision', 'abcdef01', '-b', 'ca.monotone.sandbox.buildbot'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 1, Expect([ 'clobber', 'workdir' ], self.basedir) + 0, Expect([ 'path/to/mtn', 'pull', self.repourl+"?"+self.branch, '--db', os.path.join(self.basedir, 'db.mtn'), '--ticker=dot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'checkout', self.basedir_workdir, '--db', os.path.join(self.basedir, 'db.mtn'), '--revision', 'abcdef01', '--branch', 'ca.monotone.sandbox.buildbot'], self.basedir, keepStdout=True, sendRC=False, timeout=120, usePTY=False, environ=exp_environ) + 0, Expect(['path/to/mtn', 'automate', 'select', 'w:'], self.basedir_workdir, keepStdout=True, sendRC=False, timeout=120, usePTY=False) + 0, ] self.patch_runprocess(*expects) d = self.run_command() d.addCallback(self.check_sourcedata, self.repourl+"?"+self.branch) return d # Testing parseGotRevision def do_test_parseGotRevision(self, stdout, exp): self.patch_getCommand('mtn', 'path/to/mtn') self.make_command(mtn.Monotone, dict( workdir='workdir', repourl=self.repourl, branch=self.branch )) def _dovccmd(fn, dopull, callback=None, keepStdout=False): #self.assertTrue(keepStdout) self.cmd.command = mock.Mock() self.cmd.command.stdout = stdout d = defer.succeed(None) d.addCallback(callback) return d self.cmd._dovccmd = _dovccmd self.cmd.srcdir = self.cmd.workdir d = self.cmd.parseGotRevision() def check(res): self.assertEqual(res, exp) d.addCallback(check) return d def test_parseGotRevision_bogus(self): return self.do_test_parseGotRevision("mtn: misuse: no match for selection '1234'\n", None) def test_parseGotRevision_wrong_length(self): return self.do_test_parseGotRevision("\n1234abcd\n", None) def test_parseGotRevision_ok(self): return self.do_test_parseGotRevision( "\n4026d33b0532b11f36b0875f63699adfa8ee8662\n", "4026d33b0532b11f36b0875f63699adfa8ee8662")
''' Created on Jun 17, 2014 @author: sstober ''' import logging; log = logging.getLogger(__name__); from pylearn2.train_extensions import TrainExtension; import numpy as np; import theano; from sklearn.metrics import confusion_matrix,precision_recall_fscore_support; from pylearn2.space import CompositeSpace from pylearn2.space import NullSpace from deepthought.pylearn2ext.util import aggregate_classification, process_dataset; class ClassificationLoggingCallback(TrainExtension): def __init__(self, dataset, model, header=None, class_prf1_channels=True, confusion_channels=True, # seq_prf1_channel=True, seq_confusion_channel=True ): self.dataset = dataset; self.header = header; self.class_prf1_channels = class_prf1_channels; self.confusion_channels = confusion_channels; minibatch = model.get_input_space().make_theano_batch(); self.output_fn = theano.function(inputs=[minibatch], outputs=model.fprop(minibatch)); self.data_specs = (CompositeSpace(( model.get_input_space(), model.get_output_space())), ("features", "targets")); if self.header is not None: self.channel_prefix = self.header; else: if hasattr(self.dataset, 'name'): #s elf.dataset.name is not None: self.channel_prefix = self.dataset.name; else: self.channel_prefix = ''; def setup(self, model, dataset, algorithm): # print 'setup for dataset: {}\t {} '.format(dataset.name, dataset); # print 'self.dataset: {}\t {} '.format(self.dataset.name, self.dataset); if hasattr(self.dataset, 'get_class_labels'): class_labels = self.dataset.get_class_labels(); else: class_labels = ['0', '1']; # helper function def add_channel(name, val): model.monitor.add_channel( name=self.channel_prefix+name, ipt=None, # no input data_specs = (NullSpace(), ''), # -> no input specs val=val, dataset=self.dataset, ); if self.class_prf1_channels: for class_label in class_labels: add_channel('_precision_'+str(class_label), 0.); add_channel('_recall_'+str(class_label), 0.); add_channel('_f1_'+str(class_label), 0.); add_channel('_f1_mean', 0.); # add channels for confusion matrix if self.confusion_channels: for c1 in class_labels: for c2 in class_labels: add_channel('_confusion_'+c1+'_as_'+c2, 0.); add_channel('_seq_misclass_rate', 0.); add_channel('_wseq_misclass_rate', 0.); add_channel('_pseq_misclass_rate', 0.); add_channel('_trial_misclass_rate', 0.); add_channel('_wtrial_misclass_rate', 0.); add_channel('_ptrial_misclass_rate', 0.); add_channel('_trial_mean_f1', 0.); add_channel('_wtrial_mean_f1', 0.); add_channel('_ptrial_mean_f1', 0.); add_channel('_seq_mean_f1', 0.); add_channel('_wseq_mean_f1', 0.); add_channel('_pseq_mean_f1', 0.); def on_monitor(self, model, dataset, algorithm): # print 'self.dataset: {}\t {} '.format(self.dataset.name, self.dataset); # print self.dataset.X[0,0:5]; y_real, y_pred, output = process_dataset(model, self.dataset, data_specs=self.data_specs, output_fn=self.output_fn) if self.header is not None: print self.header; # Compute confusion matrix # print classification_report(y_real, y_pred); conf_matrix = confusion_matrix(y_real, y_pred); # if self.dataset.name == 'test': # print conf_matrix; # log values in monitoring channels channels = model.monitor.channels; if hasattr(self.dataset, 'get_class_labels'): class_labels = self.dataset.get_class_labels(); else: class_labels = ['0', '1']; # FIXME: more flexible fallback required # p, r, f1, s = precision_recall_fscore_support(y_real, y_pred, average=None); p, r, f1 = precision_recall_fscore_support(y_real, y_pred, average=None)[0:3]; mean_f1 = np.mean(f1); misclass = (y_real != y_pred).mean(); report = [['frames', mean_f1, misclass]]; channels[self.channel_prefix+'_f1_mean'].val_record[-1] = mean_f1; if self.class_prf1_channels: for i, class_label in enumerate(class_labels): channels[self.channel_prefix+'_precision_'+str(class_label)].val_record[-1] = p[i]; channels[self.channel_prefix+'_recall_'+str(class_label)].val_record[-1] = r[i]; channels[self.channel_prefix+'_f1_'+str(class_label)].val_record[-1] = f1[i]; if self.confusion_channels: # add channels for confusion matrix for i, c1 in enumerate(class_labels): for j, c2 in enumerate(class_labels): channels[self.channel_prefix+'_confusion_'+c1+'_as_'+c2].val_record[-1] = conf_matrix[i][j]; if self.dataset.name == 'test': print confusion_matrix(y_real, y_pred); if hasattr(self.dataset, 'sequence_partitions'): # print 'sequence-aggregated performance'; s_real, s_pred, s_predf, s_predp = aggregate_classification( self.dataset.sequence_partitions, y_real, y_pred, output); # NOTE: uses weighted version for printout # both, weighted and un-weighted are logged in the monitor for plotting # p, r, f1, s = precision_recall_fscore_support(s_real, s_pred, average=None); p, r, f1 = precision_recall_fscore_support(s_real, s_pred, average=None)[0:3]; s_mean_f1 = np.mean(f1); # p, r, f1, s = precision_recall_fscore_support(s_real, s_predf, average=None); p, r, f1 = precision_recall_fscore_support(s_real, s_predf, average=None)[0:3]; ws_mean_f1 = np.mean(f1); # p, r, f1, s = precision_recall_fscore_support(s_real, s_predp, average=None); p, r, f1 = precision_recall_fscore_support(s_real, s_predp, average=None)[0:3]; ps_mean_f1 = np.mean(f1); # print classification_report(s_real, s_predf); # print confusion_matrix(s_real, s_predf); s_misclass = (s_real != s_pred).mean(); ws_misclass = (s_real != s_predf).mean(); ps_misclass = (s_real != s_predp).mean(); report.append(['sequences', s_mean_f1, s_misclass]); report.append(['w. sequences', ws_mean_f1, ws_misclass]); report.append(['p. sequences', ps_mean_f1, ps_misclass]); # print 'seq misclass {:.4f}'.format(s_misclass); # print 'weighted seq misclass {:.4f}'.format(ws_misclass); channels[self.channel_prefix+'_seq_misclass_rate'].val_record[-1] = s_misclass; channels[self.channel_prefix+'_wseq_misclass_rate'].val_record[-1] = ws_misclass; channels[self.channel_prefix+'_pseq_misclass_rate'].val_record[-1] = ps_misclass; channels[self.channel_prefix+'_seq_mean_f1'].val_record[-1] = s_mean_f1; channels[self.channel_prefix+'_wseq_mean_f1'].val_record[-1] = ws_mean_f1; channels[self.channel_prefix+'_pseq_mean_f1'].val_record[-1] = ps_mean_f1; if hasattr(self.dataset, 'trial_partitions'): # print 'trial-aggregated performance'; t_real, t_pred, t_predf, t_predp = aggregate_classification( self.dataset.trial_partitions, y_real, y_pred, output); # NOTE: uses un-weighted version # both, weighted and un-weighted are logged in the monitor for plotting # p, r, f1, s = precision_recall_fscore_support(t_real, t_pred, average=None); p, r, f1 = precision_recall_fscore_support(t_real, t_pred, average=None)[0:3]; t_mean_f1 = np.mean(f1); # p, r, f1, s = precision_recall_fscore_support(t_real, t_predf, average=None); p, r, f1 = precision_recall_fscore_support(t_real, t_predf, average=None)[0:3]; wt_mean_f1 = np.mean(f1); # p, r, f1, s = precision_recall_fscore_support(t_real, t_predp, average=None); p, r, f1 = precision_recall_fscore_support(t_real, t_predp, average=None)[0:3]; pt_mean_f1 = np.mean(f1); # print classification_report(t_real, t_pred); # if self.dataset.name == 'test': # print confusion_matrix(t_real, t_predp); t_misclass = (t_real != t_pred).mean(); wt_misclass = (t_real != t_predf).mean(); pt_misclass = (t_real != t_predp).mean(); report.append(['trials', t_mean_f1, t_misclass]); report.append(['w. trials', wt_mean_f1, wt_misclass]); report.append(['p. trials', pt_mean_f1, pt_misclass]); # print 'trial misclass {:.4f}'.format(t_misclass); # print 'weighted trial misclass {:.4f}'.format(wt_misclass); channels[self.channel_prefix+'_trial_misclass_rate'].val_record[-1] = t_misclass; channels[self.channel_prefix+'_wtrial_misclass_rate'].val_record[-1] = wt_misclass; channels[self.channel_prefix+'_ptrial_misclass_rate'].val_record[-1] = pt_misclass; channels[self.channel_prefix+'_trial_mean_f1'].val_record[-1] = t_mean_f1; channels[self.channel_prefix+'_wtrial_mean_f1'].val_record[-1] = wt_mean_f1; channels[self.channel_prefix+'_ptrial_mean_f1'].val_record[-1] = pt_mean_f1; for label, f1, misclass in report: print '{:>15}: f1 = {:.3f} mc = {:.3f}'.format(label, f1, misclass);
# jsb/socklib/xmpp/bot.py # # """ jabber bot definition """ ## jsb imports from jsb.lib.users import users from jsb.utils.exception import handle_exception from jsb.utils.trace import whichmodule from jsb.utils.locking import lockdec from jsb.utils.pdod import Pdod from jsb.utils.dol import Dol from jsb.lib.less import Less from jsb.lib.callbacks import callbacks, remote_callbacks from jsb.lib.threads import start_new_thread from jsb.lib.botbase import BotBase from jsb.lib.exit import globalshutdown from jsb.lib.channelbase import ChannelBase from jsb.lib.fleet import getfleet ## jsb.socket imports from jsb.lib.socklib.utils.generic import waitforqueue, jabberstrip, getrandomnick from jsb.utils.generic import toenc, fromenc ## xmpp imports from jsb.contrib.xmlstream import XMLescape, XMLunescape from presence import Presence from message import Message from iq import Iq from core import XMLStream from wait import XMPPWait, XMPPErrorWait from jid import JID, InvalidJID from errors import xmpperrors ## basic imports import time import Queue import os import threading import thread import types import xml import re import hashlib import logging import cgi ## locks outlock = thread.allocate_lock() inlock = thread.allocate_lock() connectlock = thread.allocate_lock() outlocked = lockdec(outlock) inlocked = lockdec(inlock) connectlocked = lockdec(connectlock) ## SXMPPBot class class SXMPPBot(XMLStream, BotBase): """ xmpp bot class. """ def __init__(self, cfg=None, usersin=None, plugs=None, jid=None, *args, **kwargs): BotBase.__init__(self, cfg, usersin, plugs, jid, *args, **kwargs) self.port = 5222 if not self.host: self.host = self.cfg.host if not self.host: raise Exception("%s - host not set - %s" % (self.name, str(self.cfg))) self.username = self.user.split('@')[0] XMLStream.__init__(self, self.host, self.port, self.name) self.type = 'sxmpp' self.sock = None self.me = self.cfg.user self.jid = self.me self.lastin = None self.test = 0 self.password = "" self.connecttime = 0 self.connection = None self.privwait = XMPPWait() self.errorwait = XMPPErrorWait() self.jabber = True self.jids = {} self.topics = {} self.timejoined = {} self.channels409 = [] if self.state and not self.state.data.ratelimit: self.state.data.ratelimit = 0.05 if self.port == 0: self.port = 5222 def _resumedata(self): """ return data needed for resuming. """ return {self.name: { 'type': self.type, 'nick': self.nick, 'server': self.server, 'port': self.port, 'password': self.password, 'ipv6': self.ipv6, 'user': self.user }} def _keepalive(self): """ keepalive method .. send empty string to self every 3 minutes. """ nrsec = 0 self.sendpresence() while not self.stopped: time.sleep(1) nrsec += 1 if nrsec < 180: continue else: nrsec = 0 self.sendpresence() def sendpresence(self): """ send presence based on status and status text set by user. """ if self.state: if self.state.has_key('status') and self.state['status']: status = self.state['status'] else: status = "" if self.state.has_key('show') and self.state['show']: show = self.state['show'] else: show = "" else: status = "" show = "" logging.debug('%s - keepalive - %s - %s' % (self.name, show, status)) if show and status: p = Presence({'to': self.me, 'show': show, 'status': status}) elif show: p = Presence({'to': self.me, 'show': show }) elif status: p = Presence({'to': self.me, 'status': status}) else: p = Presence({'to': self.me }) self.send(p) def _keepchannelsalive(self): """ channels keep alive method. """ nrsec = 0 p = Presence({'to': self.me, 'txt': '' }) while not self.stopped: time.sleep(1) nrsec += 1 if nrsec < 600: continue else: nrsec = 0 for chan in self.state['joinedchannels']: if chan not in self.channels409: p = Presence({'to': chan}) self.send(p) def connect(self, reconnect=True): """ connect the xmpp server. """ try: if not XMLStream.connect(self): logging.error('%s - connect to %s:%s failed' % (self.name, self.host, self.port)) return else: logging.warn('%s - connected' % self.name) self.logon(self.cfg.user, self.cfg.password) start_new_thread(self._keepalive, ()) self.requestroster() self._raw("<presence/>") self.connectok.set() self.sock.settimeout(None) return True except Exception, ex: handle_exception() if reconnect: return self.reconnect() def logon(self, user, password): """ logon on the xmpp server. """ iq = self.initstream() if not iq: logging.error("sxmpp - cannot init stream") ; return if not self.auth(user, password, iq.id): logging.warn("%s - sleeping 20 seconds before register" % self.name) time.sleep(20) if self.register(user, password): time.sleep(5) self.auth(user, password) else: time.sleep(10) self.exit() return XMLStream.logon(self) def initstream(self): """ send initial string sequence to the xmpp server. """ logging.debug('%s - starting initial stream sequence' % self.name) self._raw("""<stream:stream to='%s' xmlns='jabber:client' xmlns:stream='http://etherx.jabber.org/streams'>""" % (self.user.split('@')[1], )) result = self.connection.read() iq = self.loop_one(result) logging.debug("%s - initstream - %s" % (self.name, result)) return iq def register(self, jid, password): """ register the jid to the server. """ try: resource = jid.split("/")[1] except IndexError: resource = "jsb" logging.warn('%s - registering %s' % (self.name, jid)) self._raw("""<iq type='get'><query xmlns='jabber:iq:register'/></iq>""") result = self.connection.read() iq = self.loop_one(result) if not iq: logging.error("%s - unable to register" % self.name) return logging.debug('%s - register: %s' % (self.name, str(iq))) self._raw("""<iq type='set'><query xmlns='jabber:iq:register'><username>%s</username><resource>%s</resource><password>%s</password></query></iq>""" % (jid.split('@')[0], resource, password)) result = self.connection.read() logging.debug('%s - register - %s' % (self.name, result)) if not result: return False iq = self.loop_one(result) if not iq: logging.error("%s - can't decode data - %s" % (self.name, result)) return False logging.debug('sxmpp - register - %s' % result) if iq.error: logging.warn('%s - register FAILED - %s' % (self.name, iq.error)) if not iq.error.code: logging.error("%s - can't determine error code" % self.name) ; return False if iq.error.code == "405": logging.error("%s - this server doesn't allow registration by the bot, you need to create an account for it yourself" % self.name) elif iq.error.code == "500": logging.error("%s - %s - %s" % (self.name, iq.error.code, iq.error.text)) else: logging.error("%s - %s" % (self.name, xmpperrors[iq.error.code])) self.error = iq.error return False logging.warn('%s - register ok' % self.name) return True def auth(self, jid, password, digest=""): """ auth against the xmpp server. """ logging.warn('%s - authing %s' % (self.name, jid)) name = jid.split('@')[0] rsrc = self.cfg['resource'] or self.cfg['resource'] or 'jsb'; self._raw("""<iq type='get'><query xmlns='jabber:iq:auth'><username>%s</username></query></iq>""" % name) result = self.connection.read() iq = self.loop_one(result) logging.debug('%s - auth - %s' % (self.name, result)) if ('digest' in result) and digest: s = hashlib.new('SHA1') s.update(digest) s.update(password) d = s.hexdigest() self._raw("""<iq type='set'><query xmlns='jabber:iq:auth'><username>%s</username><digest>%s</digest><resource>%s</resource></query></iq>""" % (name, d, rsrc)) else: self._raw("""<iq type='set'><query xmlns='jabber:iq:auth'><username>%s</username><resource>%s</resource><password>%s</password></query></iq>""" % (name, rsrc, password)) result = self.connection.read() iq = self.loop_one(result) if not iq: logging.error('%s - auth failed - %s' % (self.name, result)) return False logging.debug('%s - auth - %s' % (self.name, result)) if iq.error: logging.warn('%s - auth failed - %s' % (self.name, iq.error.code)) if iq.error.code == "401": logging.warn("%s - wrong user or password" % self.name) else: logging.warn("%s - %s" % (self.name, result)) self.error = iq.error return False logging.warn('%s - auth ok' % self.name) return True def requestroster(self): """ request roster from xmpp server. """ self._raw("<iq type='get'><query xmlns='jabber:iq:roster'/></iq>") def disconnectHandler(self, ex): """ disconnect handler. """ self.reconnect() def outnocb(self, printto, txt, how=None, event=None, html=False, *args, **kwargs): """ output txt to bot. """ if printto and printto in self.state['joinedchannels']: outtype = 'groupchat' else: outtype = "chat" target = printto if not html: txt = self.normalize(txt) repl = Message({'from': self.me, 'to': target, 'type': outtype, 'txt': txt}) if html: repl.html = txt if not repl.type: repl.type = 'normal' logging.debug("%s - sxmpp - out - %s - %s" % (self.name, printto, unicode(txt))) self.send(repl) def broadcast(self, txt): """ broadcast txt to all joined channels. """ for i in self.state['joinedchannels']: self.say(i, txt) def handle_iq(self, data): """ iq handler .. overload this when needed. """ pass def handle_message(self, data): """ message handler. """ m = Message(data) m.parse(self) if data.type == 'groupchat' and data.subject: logging.debug("%s - checking topic" % self.name) self.topiccheck(m) nm = Message(m) callbacks.check(self, nm) return if data.get('x').xmlns == 'jabber:x:delay': logging.warn("%s - ignoring delayed message" % self.name) return self.privwait.check(m) if m.isresponse: logging.debug("%s - message is a response" % self.name) return jid = None m.origjid = m.jid for node in m.subelements: try: m.jid = node.x.item.jid except (AttributeError, TypeError): continue if self.me in m.fromm: logging.debug("%s - message to self .. ignoring" % self.name) return 0 try: if m.type == 'error': if m.code: logging.error('%s - error - %s' % (self.name, str(m))) self.errorwait.check(m) self.errorHandler(m) except Exception, ex: handle_exception() self.put(m) def errorHandler(self, event): """ error handler .. calls the errorhandler set in the event. """ try: logging.error("%s - error occured in %s - %s" % (self.name, event.txt, event.userhost)) event.errorHandler() except AttributeError: logging.error('%s - unhandled error - %s' % (self.name, event.dump())) def handle_presence(self, data): """ presence handler. """ p = Presence(data) p.parse() frm = p.fromm nickk = "" nick = p.nick if self.me in p.userhost: return 0 if nick: self.userhosts[nick] = str(frm) nickk = nick jid = None for node in p.subelements: try: jid = node.x.item.jid except (AttributeError, TypeError): continue if nickk and jid: channel = p.channel if not self.jids.has_key(channel): self.jids[channel] = {} self.jids[channel][nickk] = jid self.userhosts[nickk] = str(jid) logging.debug('%s - setting jid of %s (%s) to %s' % (self.name, nickk, channel, jid)) if p.type == 'subscribe': pres = Presence({'to': p.fromm, 'type': 'subscribed'}) self.send(pres) pres = Presence({'to': p.fromm, 'type': 'subscribe'}) self.send(pres) nick = p.resource if p.type != 'unavailable': p.joined = True p.type = 'available' elif self.me in p.userhost: try: del self.jids[p.channel] logging.debug('%s - removed %s channel jids' % (self.name, p.channel)) except KeyError: pass else: try: del self.jids[p.channel][p.nick] logging.debug('%s - removed %s jid' % (self.name, p.nick)) except KeyError: pass if p.type == 'error': for node in p.subelements: try: err = node.error.code except (AttributeError, TypeError): err = 'no error set' try: txt = node.text.data except (AttributeError, TypeError): txt = "" if err: logging.error('%s - error - %s - %s' % (self.name, err, txt)) self.errorwait.check(p) try: method = getattr(self,'handle_' + err) try: method(p) except: handle_exception() except AttributeError: pass self.doevent(p) def invite(self, jid): pres = Presence({'to': jid, 'type': 'subscribe'}) self.send(pres) time.sleep(2) pres = Presence({'to': jid}) self.send(pres) def send(self, what): """ send stanza to the server. """ if not what: logging.debug("%s - can't send empty message" % self.name) return try: to = what['to'] except (KeyError, TypeError): logging.error("%s - can't determine where to send %s to" % (self.name, what)) return try: jid = JID(to) except (InvalidJID, AttributeError): logging.error("%s - invalid jid - %s - %s" % (self.name, str(to), str(what))) return try: del what['from'] except KeyError: pass try: xml = what.tojabber() if not xml: raise Exception("can't convert %s to xml .. bot.send()" % what) except (AttributeError, TypeError): handle_exception() return if not self.checkifvalid(xml): logging.error("%s - NOT PROPER XML - %s" % (self.name, xml)) else: self._raw(xml) def action(self, printto, txt, fromm=None, groupchat=True): """ send an action. """ txt = "/me " + txt if self.google: fromm = self.me if printto in self.state['joinedchannels'] and groupchat: message = Message({'to': printto, 'txt': txt, 'type': 'groupchat'}) else: message = Message({'to': printto, 'txt': txt}) if fromm: message.fromm = fromm self.send(message) def userwait(self, msg, txt): """ wait for user response. """ msg.reply(txt) queue = Queue.Queue() self.privwait.register(msg, queue) result = queue.get() if result: return result.txt def save(self): """ save bot's state. """ if self.state: self.state.save() def quit(self): """ send unavailable presence. """ if self.error: return presence = Presence({'type': 'unavailable' ,'to': self.jid}) if self.state: for i in self.state.data.joinedchannels: presence.to = i self.send(presence) presence = Presence({'type': 'unavailable', 'to': self.jid}) presence['from'] = self.me self.send(presence) def setstatus(self, status, show=""): """ send status presence. """ if self.error: return if self.state: self.state['status'] = status self.state['show'] = show self.state.save() presence = Presence({'status': status, 'show': show ,'to': self.jid}) self.send(presence) def shutdown(self): self.outqueue.put_nowait(None) def join(self, channel, password=None, nick=None): """ join conference. """ if channel.startswith("#"): return try: if not nick: nick = channel.split('/')[1] except IndexError: nick = self.nick channel = channel.split('/')[0] q = Queue.Queue() self.errorwait.register("409", q, 3) self.errorwait.register("401", q, 3) self.errorwait.register("400", q, 3) presence = Presence({'to': channel + '/' + nick}) if password: presence.x.password = password self.send(presence) errorobj = waitforqueue(q, 3000) if errorobj: err = errorobj[0].error logging.error('%s - error joining %s - %s' % (self.name, channel, err)) if err >= '400': if channel not in self.channels409: self.channels409.append(channel) return err self.timejoined[channel] = time.time() chan = ChannelBase(channel, self.botname) chan.data['nick'] = nick if password: chan.data['key'] = password if not chan.data.has_key('cc'): chan.data['cc'] = self.cfg['defaultcc'] or '!' if channel not in self.state['joinedchannels']: self.state['joinedchannels'].append(channel) self.state.save() if channel in self.channels409: self.channels409.remove(channel) return 1 def part(self, channel): """ leave conference. """ if channel.startswith("#"): return presence = Presence({'to': channel}) presence.type = 'unavailable' self.send(presence) if channel in self.state['joinedchannels']: self.state['joinedchannels'].remove(channel) self.state.save() return 1 def outputnolog(self, printto, what, how, who=None, fromm=None): """ do output but don't log it. """ if fromm: return self.saynocb(printto, what) def topiccheck(self, msg): """ check if topic is set. """ if msg.groupchat: try: topic = msg.subject if not topic: return None self.topics[msg.channel] = (topic, msg.userhost, time.time()) logging.debug('%s - topic of %s set to %s' % (self.name, msg.channel, topic)) except AttributeError: return None def settopic(self, channel, txt): """ set topic. """ pres = Message({'to': channel, 'subject': txt}) pres.type = 'groupchat' self.send(pres) def gettopic(self, channel): """ get topic. """ try: topic = self.topics[channel] return topic except KeyError: return None def domsg(self, msg): """ dispatch an msg on the bot. """ self.doevent(msg) def normalize(self, what): #what = cgi.escape(what) what = what.replace("\002", "") what = what.replace("\003", "") what = what.replace("<b>", "") what = what.replace("</b>", "") what = what.replace("&lt;b&gt;", "") what = what.replace("&lt;/b&gt;", "") what = what.replace("<i>", "") what = what.replace("</i>", "") what = what.replace("&lt;i&gt;", "") what = what.replace("&lt;/i&gt;", "") return what def doreconnect(self): """ reconnect to the server. """ botjid = self.jid newbot = getfleet().makebot('sxmpp', self.name, cfg=self.cfg) newbot.reconnectcount = self.reconnectcount self.exit() if newbot.start(): self.jid += '.old' #newbot.joinchannels() if fleet.replace(botjid, newbot): return True return False
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABCMeta, abstractmethod import sys if sys.version >= '3': xrange = range from pyspark import SparkContext from pyspark.sql import DataFrame from pyspark.ml import Estimator, Transformer, Model from pyspark.ml.param import Params from pyspark.ml.util import _jvm from pyspark.ml.common import inherit_doc, _java2py, _py2java class JavaWrapper(object): """ Wrapper class for a Java companion object """ def __init__(self, java_obj=None): super(JavaWrapper, self).__init__() self._java_obj = java_obj @classmethod def _create_from_java_class(cls, java_class, *args): """ Construct this object from given Java classname and arguments """ java_obj = JavaWrapper._new_java_obj(java_class, *args) return cls(java_obj) def _call_java(self, name, *args): m = getattr(self._java_obj, name) sc = SparkContext._active_spark_context java_args = [_py2java(sc, arg) for arg in args] return _java2py(sc, m(*java_args)) @staticmethod def _new_java_obj(java_class, *args): """ Returns a new Java object. """ sc = SparkContext._active_spark_context java_obj = _jvm() for name in java_class.split("."): java_obj = getattr(java_obj, name) java_args = [_py2java(sc, arg) for arg in args] return java_obj(*java_args) @staticmethod def _new_java_array(pylist, java_class): """ Create a Java array of given java_class type. Useful for calling a method with a Scala Array from Python with Py4J. :param pylist: Python list to convert to a Java Array. :param java_class: Java class to specify the type of Array. Should be in the form of sc._gateway.jvm.* (sc is a valid Spark Context). :return: Java Array of converted pylist. Example primitive Java classes: - basestring -> sc._gateway.jvm.java.lang.String - int -> sc._gateway.jvm.java.lang.Integer - float -> sc._gateway.jvm.java.lang.Double - bool -> sc._gateway.jvm.java.lang.Boolean """ sc = SparkContext._active_spark_context java_array = sc._gateway.new_array(java_class, len(pylist)) for i in xrange(len(pylist)): java_array[i] = pylist[i] return java_array @inherit_doc class JavaParams(JavaWrapper, Params): """ Utility class to help create wrapper classes from Java/Scala implementations of pipeline components. """ #: The param values in the Java object should be #: synced with the Python wrapper in fit/transform/evaluate/copy. __metaclass__ = ABCMeta def __del__(self): if SparkContext._active_spark_context: SparkContext._active_spark_context._gateway.detach(self._java_obj) def _make_java_param_pair(self, param, value): """ Makes a Java param pair. """ sc = SparkContext._active_spark_context param = self._resolveParam(param) java_param = self._java_obj.getParam(param.name) java_value = _py2java(sc, value) return java_param.w(java_value) def _transfer_params_to_java(self): """ Transforms the embedded params to the companion Java object. """ paramMap = self.extractParamMap() for param in self.params: if param in paramMap: pair = self._make_java_param_pair(param, paramMap[param]) self._java_obj.set(pair) def _transfer_param_map_to_java(self, pyParamMap): """ Transforms a Python ParamMap into a Java ParamMap. """ paramMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap") for param in self.params: if param in pyParamMap: pair = self._make_java_param_pair(param, pyParamMap[param]) paramMap.put([pair]) return paramMap def _create_params_from_java(self): """ SPARK-10931: Temporary fix to create params that are defined in the Java obj but not here """ java_params = list(self._java_obj.params()) from pyspark.ml.param import Param for java_param in java_params: java_param_name = java_param.name() if not hasattr(self, java_param_name): param = Param(self, java_param_name, java_param.doc()) setattr(param, "created_from_java_param", True) setattr(self, java_param_name, param) self._params = None # need to reset so self.params will discover new params def _transfer_params_from_java(self): """ Transforms the embedded params from the companion Java object. """ sc = SparkContext._active_spark_context for param in self.params: if self._java_obj.hasParam(param.name): java_param = self._java_obj.getParam(param.name) # SPARK-14931: Only check set params back to avoid default params mismatch. if self._java_obj.isSet(java_param): value = _java2py(sc, self._java_obj.getOrDefault(java_param)) self._set(**{param.name: value}) # SPARK-10931: Temporary fix for params that have a default in Java if self._java_obj.hasDefault(java_param) and not self.isDefined(param): value = _java2py(sc, self._java_obj.getDefault(java_param)).get() self._setDefault(**{param.name: value}) def _transfer_param_map_from_java(self, javaParamMap): """ Transforms a Java ParamMap into a Python ParamMap. """ sc = SparkContext._active_spark_context paramMap = dict() for pair in javaParamMap.toList(): param = pair.param() if self.hasParam(str(param.name())): paramMap[self.getParam(param.name())] = _java2py(sc, pair.value()) return paramMap @staticmethod def _empty_java_param_map(): """ Returns an empty Java ParamMap reference. """ return _jvm().org.apache.spark.ml.param.ParamMap() def _to_java(self): """ Transfer this instance's Params to the wrapped Java object, and return the Java object. Used for ML persistence. Meta-algorithms such as Pipeline should override this method. :return: Java object equivalent to this instance. """ self._transfer_params_to_java() return self._java_obj @staticmethod def _from_java(java_stage): """ Given a Java object, create and return a Python wrapper of it. Used for ML persistence. Meta-algorithms such as Pipeline should override this method as a classmethod. """ def __get_class(clazz): """ Loads Python class from its name. """ parts = clazz.split('.') module = ".".join(parts[:-1]) m = __import__(module) for comp in parts[1:]: m = getattr(m, comp) return m stage_name = java_stage.getClass().getName().replace("org.apache.spark", "pyspark") # Generate a default new instance from the stage_name class. py_type = __get_class(stage_name) if issubclass(py_type, JavaParams): # Load information from java_stage to the instance. py_stage = py_type() py_stage._java_obj = java_stage # SPARK-10931: Temporary fix so that persisted models would own params from Estimator if issubclass(py_type, JavaModel): py_stage._create_params_from_java() py_stage._resetUid(java_stage.uid()) py_stage._transfer_params_from_java() elif hasattr(py_type, "_from_java"): py_stage = py_type._from_java(java_stage) else: raise NotImplementedError("This Java stage cannot be loaded into Python currently: %r" % stage_name) return py_stage def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. This implementation first calls Params.copy and then make a copy of the companion Java pipeline component with extra params. So both the Python wrapper and the Java pipeline component get copied. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() that = super(JavaParams, self).copy(extra) if self._java_obj is not None: that._java_obj = self._java_obj.copy(self._empty_java_param_map()) that._transfer_params_to_java() return that @inherit_doc class JavaEstimator(JavaParams, Estimator): """ Base class for :py:class:`Estimator`s that wrap Java/Scala implementations. """ __metaclass__ = ABCMeta @abstractmethod def _create_model(self, java_model): """ Creates a model from the input Java model reference. """ raise NotImplementedError() def _fit_java(self, dataset): """ Fits a Java model to the input dataset. :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame` :param params: additional params (overwriting embedded values) :return: fitted Java model """ self._transfer_params_to_java() return self._java_obj.fit(dataset._jdf) def _fit(self, dataset): java_model = self._fit_java(dataset) model = self._create_model(java_model) return self._copyValues(model) @inherit_doc class JavaTransformer(JavaParams, Transformer): """ Base class for :py:class:`Transformer`s that wrap Java/Scala implementations. Subclasses should ensure they have the transformer Java object available as _java_obj. """ __metaclass__ = ABCMeta def _transform(self, dataset): self._transfer_params_to_java() return DataFrame(self._java_obj.transform(dataset._jdf), dataset.sql_ctx) @inherit_doc class JavaModel(JavaTransformer, Model): """ Base class for :py:class:`Model`s that wrap Java/Scala implementations. Subclasses should inherit this class before param mix-ins, because this sets the UID from the Java model. """ __metaclass__ = ABCMeta def __init__(self, java_model=None): """ Initialize this instance with a Java model object. Subclasses should call this constructor, initialize params, and then call _transfer_params_from_java. This instance can be instantiated without specifying java_model, it will be assigned after that, but this scenario only used by :py:class:`JavaMLReader` to load models. This is a bit of a hack, but it is easiest since a proper fix would require MLReader (in pyspark.ml.util) to depend on these wrappers, but these wrappers depend on pyspark.ml.util (both directly and via other ML classes). """ super(JavaModel, self).__init__(java_model) if java_model is not None: # SPARK-10931: This is a temporary fix to allow models to own params # from estimators. Eventually, these params should be in models through # using common base classes between estimators and models. self._create_params_from_java() self._resetUid(java_model.uid())
# -*- coding:utf-8 -*- """ driver class management """ from sfa.util.sfalogging import logger from sfa.util.xrn import Xrn, urn_to_hrn from sfa.rspecs.version_manager import VersionManager from sfa.rspecs.rspec import RSpec from sfa.managers.driver import Driver from sfa.iotlab.iotlabshell import IotLABShell from sfa.iotlab.iotlabaggregate import IotLABAggregate from sfa.iotlab.iotlablease import LeaseTable class IotLabDriver(Driver): """ SFA driver for Iot-LAB testbed """ def __init__(self, api): Driver.__init__(self, api) config = api.config self.api = api self.root_auth = config.SFA_REGISTRY_ROOT_AUTH self.shell = IotLABShell() # need by sfa driver self.cache = None def check_sliver_credentials(self, creds, urns): """ Not used and need by SFA """ pass ######################################## ########## registry oriented ######################################## ########## def register(self, sfa_record, hrn, pub_key): logger.warning("iotlabdriver register : not implemented") return -1 ########## def update(self, old_sfa_record, new_sfa_record, hrn, new_key): logger.warning("iotlabdriver update : not implemented") return True ########## def remove(self, sfa_record): logger.warning("iotlabdriver remove : not implemented") return True ######################################## ########## aggregate oriented ######################################## def provision(self, urns, options=None): logger.warning("iotlabdriver provision : not implemented") version_manager = VersionManager() opt = options['geni_rspec_version'] rspec_version = version_manager.get_version(opt) return self.describe(urns, rspec_version, options=options) def delete(self, urns, options=None): logger.warning("iotlabdriver delete : not implemented") geni_slivers = [] return geni_slivers def aggregate_version(self): logger.warning("iotlabdriver aggregate_version") version_manager = VersionManager() ad_rspec_versions = [] request_rspec_versions = [] for rspec_version in version_manager.versions: if rspec_version.content_type in ['*', 'ad']: ad_rspec_versions.append(rspec_version.to_dict()) if rspec_version.content_type in ['*', 'request']: request_rspec_versions.append(rspec_version.to_dict()) return { 'testbed': self.hrn, 'geni_request_rspec_versions': request_rspec_versions, 'geni_ad_rspec_versions': ad_rspec_versions} def list_resources(self, version=None, options=None): logger.warning("iotlabdriver list_resources") if not options: options = {} aggregate = IotLABAggregate(self) rspec = aggregate.list_resources(version=version, options=options) return rspec def describe(self, urns, version, options=None): logger.warning("iotlabdriver describe") if not options: options = {} aggregate = IotLABAggregate(self) return aggregate.describe(urns, version=version, options=options) def status(self, urns, options=None): logger.warning("iotlabdriver status") aggregate = IotLABAggregate(self) desc = aggregate.describe(urns, version='GENI 3') status = {'geni_urn': desc['geni_urn'], 'geni_slivers': desc['geni_slivers']} return status def _get_users(self): """ Get all users """ ret = self.shell.get_users() if 'error' in ret: return None return ret def _get_user_login(self, caller_user): """ Get user login with email """ email = caller_user['email'] # ensure user exist in LDAP tree users = self._get_users() if users and not email in users: self.shell.add_user(caller_user) users = self._get_users() if users and email in users: return users[email]['login'] else: return None @classmethod def _get_experiment(cls, rspec): """ Find in RSpec leases the experiment start time, duration and nodes list. :Example: <rspec> ... <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab" start_time="1427792400" duration="30"> <node component_id= "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/> </lease> <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab" start_time="1427792600" duration="50"> <node component_id= "urn:publicid:IDN+iotlab+node+m3-15.grenoble.iot-lab.info"/> </lease> ... </rspec> """ leases = rspec.version.get_leases() start_time = min([int(lease['start_time']) for lease in leases]) end_time = max([int(lease['start_time']) + int(lease['duration'])*60 for lease in leases]) nodes_list = [Xrn.unescape(Xrn(lease['component_id'].strip(), type='node').get_leaf()) for lease in leases] # uniq hostnames nodes_list = list(set(nodes_list)) from math import floor duration = floor((end_time - start_time)/60) # minutes return nodes_list, start_time, duration def _save_db_lease(self, job_id, slice_hrn): """ Save lease table row in SFA database """ lease_row = LeaseTable(job_id, slice_hrn) logger.warning("iotlabdriver _save_db_lease lease row : %s" % lease_row) self.api.dbsession().add(lease_row) self.api.dbsession().commit() def allocate(self, urn, rspec_string, expiration, options=None): """ Allocate method submit an experiment on Iot-LAB testbed with : * user : get the slice user which launch request (caller_hrn) * reservation : get the start time and duration in RSpec leases * nodes : get the nodes list in RSpec leases If we have a request success on Iot-LAB testbed we store in SFA database the assocation OAR scheduler job id and slice hrn :param urn : slice urn :param rspec_string : RSpec received :param options : options with slice users (geni_users) """ # pylint:disable=R0914 logger.warning("iotlabdriver allocate") xrn = Xrn(urn) aggregate = IotLABAggregate(self) # parse rspec rspec = RSpec(rspec_string) caller_hrn = options.get('actual_caller_hrn', []) geni_users = options.get('geni_users', []) caller_user = [user for user in geni_users if urn_to_hrn(user['urn'])[0] == caller_hrn][0] logger.warning("iotlabdriver allocate caller : %s" % caller_user['email']) login = self._get_user_login(caller_user) # only if we have a user if login: nodes_list, start_time, duration = \ self._get_experiment(rspec) logger.warning("iotlabdriver allocate submit OAR job :" " %s %s %s %s" % (xrn.hrn, start_time, duration, nodes_list)) # [0-9A-Za-z_] with onelab.inria.test_iotlab exp_name = '_'.join((xrn.hrn).split('.')) # submit OAR job ret = self.shell.reserve_nodes(login, exp_name, nodes_list, start_time, duration) # in case of job submission success save slice and lease job # id association in database if 'id' in ret: self._save_db_lease(int(ret['id']), xrn.hrn) return aggregate.describe([xrn.get_urn()], version=rspec.version)
import logging from collections import defaultdict import networkx import claripy from ..sim_type import SimType, SimTypePointer, SimTypeChar, SimTypeString, SimTypeReg from ..calling_conventions import DEFAULT_CC from ..knowledge_base import KnowledgeBase from ..errors import AngrDirectorError from . import ExplorationTechnique l = logging.getLogger("angr.exploration_techniques.director") class BaseGoal(object): REQUIRE_CFG_STATES = False def __init__(self, sort): self.sort = sort def __repr__(self): return "<TargetCondition %s>" % self.sort # # Public methods # def check(self, cfg, state, peek_blocks): """ :param angr.analyses.CFGAccurate cfg: An instance of CFGAccurate. :param angr.SimState state: The state to check. :param int peek_blocks: Number of blocks to peek ahead from the current point. :return: True if we can determine that this condition is definitely satisfiable if the path is taken, False otherwise. :rtype: bool """ raise NotImplementedError() def check_state(self, state): """ Check if the current state satisfies the goal. :param angr.SimState state: The state to check. :return: True if it satisfies the goal, False otherwise. :rtype: bool """ raise NotImplementedError() # # Private methods # @staticmethod def _get_cfg_node(cfg, state): """ Get the CFGNode object on the control flow graph given an angr state. :param angr.analyses.CFGAccurate cfg: An instance of CFGAccurate. :param angr.SimState state: The current state. :return: A CFGNode instance if the node exists, or None if the node cannot be found. :rtype: CFGNode or None """ call_stack_suffix = state.callstack.stack_suffix(cfg.context_sensitivity_level) is_syscall = state.history.jumpkind is not None and state.history.jumpkind.startswith('Ijk_Sys') block_id = cfg._generate_block_id(call_stack_suffix, state.addr, is_syscall) #if cfg.get_node(block_id) is None: # import ipdb; ipdb.set_trace() return cfg.get_node(block_id) @staticmethod def _dfs_edges(graph, source, max_steps=None): """ Perform a depth-first search on the given DiGraph, with a limit on maximum steps. :param networkx.DiGraph graph: The graph to traverse. :param Any source: The source to begin traversal. :param int max_steps: Maximum steps of the traversal, or None if not limiting steps. :return: An iterator of edges. """ if max_steps is None: yield networkx.dfs_edges(graph, source) else: steps_map = defaultdict(int) traversed = { source } stack = [ source ] while stack: src = stack.pop() for dst in graph.successors(src): if dst in traversed: continue traversed.add(dst) dst_steps = max(steps_map[src] + 1, steps_map[dst]) if dst_steps > max_steps: continue yield src, dst steps_map[dst] = dst_steps stack.append(dst) class ExecuteAddressGoal(BaseGoal): """ A goal that prioritizes states reaching (or are likely to reach) certain address in some specific steps. """ def __init__(self, addr): super(ExecuteAddressGoal, self).__init__('execute_address') self.addr = addr def __repr__(self): return "<ExecuteAddressCondition targeting %#x>" % self.addr def check(self, cfg, state, peek_blocks): """ Check if the specified address will be executed :param cfg: :param state: :param int peek_blocks: :return: :rtype: bool """ # Get the current CFGNode from the CFG node = self._get_cfg_node(cfg, state) if node is None: # Umm it doesn't exist on the control flow graph - why? l.error('Failed to find CFGNode for state %s on the control flow graph.', state) return False # crawl the graph to see if we can reach the target address next for src, dst in self._dfs_edges(cfg.graph, node, max_steps=peek_blocks): if src.addr == self.addr or dst.addr == self.addr: l.debug("State %s will reach %#x.", state, self.addr) return True l.debug('SimState %s will not reach %#x.', state, self.addr) return False def check_state(self, state): """ Check if the current address is the target address. :param angr.SimState state: The state to check. :return: True if the current address is the target address, False otherwise. :rtype: bool """ return state.addr == self.addr class CallFunctionGoal(BaseGoal): """ A goal that prioritizes states reaching certain function, and optionally with specific arguments. Note that constraints on arguments (and on function address as well) have to be identifiable on an accurate CFG. For example, you may have a CallFunctionGoal saying "call printf with the first argument being 'Hello, world'", and CFGAccurate must be able to figure our the first argument to printf is in fact "Hello, world", not some symbolic strings that will be constrained to "Hello, world" during symbolic execution (or simulation, however you put it). """ REQUIRE_CFG_STATES = True def __init__(self, function, arguments): super(CallFunctionGoal, self).__init__('function_call') self.function = function self.arguments = arguments if self.arguments is not None: for arg in self.arguments: if arg is not None: if len(arg) != 2: raise AngrDirectorError('Each argument must be either None or a 2-tuple contains argument ' + 'type and the expected value.' ) arg_type, expected_value = arg if not isinstance(arg_type, SimType): raise AngrDirectorError('Each argument type must be an instance of SimType.') if isinstance(expected_value, claripy.ast.Base) and expected_value.symbolic: raise AngrDirectorError('Symbolic arguments are not supported.') # TODO: allow user to provide an optional argument processor to process arguments def __repr__(self): return "<FunctionCallCondition over %s>" % self.function def check(self, cfg, state, peek_blocks): """ Check if the specified function will be reached with certain arguments. :param cfg: :param state: :param peek_blocks: :return: """ # Get the current CFGNode node = self._get_cfg_node(cfg, state) if node is None: l.error("Failed to find CFGNode for state %s on the control flow graph.", state) return False # crawl the graph to see if we can reach the target function within the limited steps for src, dst in self._dfs_edges(cfg.graph, node, max_steps=peek_blocks): the_node = None if src.addr == self.function.addr: the_node = src elif dst.addr == self.function.addr: the_node = dst if the_node is not None: if self.arguments is None: # we do not care about arguments return True else: # check arguments arch = state.arch state = the_node.input_state same_arguments = self._check_arguments(arch, state) if same_arguments: # all arguments are the same! return True l.debug("SimState %s will not reach function %s.", state, self.function) return False def check_state(self, state): """ Check if the specific function is reached with certain arguments :param angr.SimState state: The state to check :return: True if the function is reached with certain arguments, False otherwise. :rtype: bool """ if state.addr == self.function.addr: arch = state.arch if self._check_arguments(arch, state): return True return False # # Private methods # def _check_arguments(self, arch, state): # TODO: add calling convention detection to individual functions, and use that instead of the # TODO: default calling convention of the platform cc = DEFAULT_CC[arch.name](arch) # type: s_cc.SimCC for i, expected_arg in enumerate(self.arguments): if expected_arg is None: continue real_arg = cc.arg(state, i) expected_arg_type, expected_arg_value = expected_arg r = self._compare_arguments(state, expected_arg_type, expected_arg_value, real_arg) if not r: return False return True @staticmethod def _compare_arguments(state, arg_type, expected_value, real_value): """ :param SimState state: :param simvuex.s_type.SimType arg_type: :param claripy.ast.Base expected_value: :param claripy.ast.Base real_value: :return: :rtype: bool """ if real_value.symbolic: # we do not support symbolic arguments yet return False if isinstance(arg_type, SimTypePointer): # resolve the pointer and compare the content points_to_type = arg_type.pts_to if isinstance(points_to_type, SimTypeChar): # char * # perform a concrete string comparison ptr = real_value return CallFunctionGoal._compare_pointer_content(state, ptr, expected_value) else: l.error('Unsupported argument type %s in _compare_arguments(). Please bug Fish to implement.', arg_type) elif isinstance(arg_type, SimTypeString): # resolve the pointer and compare the content ptr = real_value return CallFunctionGoal._compare_pointer_content(state, ptr, expected_value) elif isinstance(arg_type, SimTypeReg): # directly compare the numbers return CallFunctionGoal._compare_integer_content(state, real_value, expected_value) else: l.error('Unsupported argument type %s in _compare_arguments(). Please bug Fish to implement.', arg_type) return False @staticmethod def _compare_pointer_content(state, ptr, expected): if isinstance(expected, str): # convert it to an AST expected = state.se.BVV(expected) length = expected.size() / 8 real_string = state.memory.load(ptr, length, endness='Iend_BE') if real_string.symbolic: # we do not support symbolic arguments return False return state.se.eval(real_string) == state.se.eval(expected) @staticmethod def _compare_integer_content(state, val, expected): # note that size difference does not matter - we only compare their concrete values if isinstance(val, claripy.ast.Base) and val.symbolic: # we do not support symboli arguments return False return state.se.eval(val) == state.se.eval(expected) class Director(ExplorationTechnique): """ An exploration technique for directed symbolic execution. A control flow graph (using CFGAccurate) is built and refined during symbolic execution. Each time the execution reaches a block that is outside of the CFG, the CFG recovery will be triggered with that state, with a maximum recovery depth (100 by default). If we see a basic block during state stepping that is not yet in the control flow graph, we go back to control flow graph recovery and "peek" more blocks forward. When stepping a simulation manager, all states are categorized into three different categories: - Might reach the destination within the peek depth. Those states are prioritized. - Will not reach the destination within the peek depth. Those states are de-prioritized. However, there is a little chance for those states to be explored as well in order to prevent over-fitting. """ def __init__(self, peek_blocks=100, peek_functions=5, goals=None, cfg_keep_states=False, goal_satisfied_callback=None, num_fallback_states=5): """ Constructor. """ super(Director, self).__init__() self._peek_blocks = peek_blocks self._peek_functions = peek_functions self._goals = goals if goals is not None else [ ] self._cfg_keep_states = cfg_keep_states self._goal_satisfied_callback = goal_satisfied_callback self._num_fallback_states = num_fallback_states self._cfg = None self._cfg_kb = None def step(self, pg, stash, **kwargs): """ :param pg: :param stash: :param kwargs: :return: """ # make sure all current blocks are in the CFG self._peek_forward(pg) # categorize all states in the simulation manager self._categorize_states(pg) if not pg.active: # active states are empty - none of our existing states will reach the target for sure self._load_fallback_states(pg) if pg.active: # step all active states forward pg._one_step(stash) if not pg.active: self._load_fallback_states(pg) return pg def add_goal(self, goal): """ Add a goal. :param BaseGoal goal: The goal to add. :return: None """ self._goals.append(goal) # # Private methods # def _peek_forward(self, pg): """ Make sure all current basic block on each state shows up in the CFG. For blocks that are not in the CFG, start CFG recovery from them with a maximum basic block depth of 100. :param pg: :return: """ if self._cfg is None: starts = list(pg.active) self._cfg_kb = KnowledgeBase(self.project, self.project.loader.main_object) self._cfg = self.project.analyses.CFGAccurate(kb=self._cfg_kb, starts=starts, max_steps=self._peek_blocks, keep_state=self._cfg_keep_states ) else: starts = list(pg.active) self._cfg.resume(starts=starts, max_steps=self._peek_blocks) def _load_fallback_states(self, pg): """ Load the last N deprioritized states will be extracted from the "deprioritized" stash and put to "active" stash. N is controlled by 'num_fallback_states'. :param SimulationManager pg: The simulation manager. :return: None """ # take back some of the deprioritized states l.debug("No more active states. Load some deprioritized states to 'active' stash.") if 'deprioritized' in pg.stashes and pg.deprioritized: pg.active.extend(pg.deprioritized[-self._num_fallback_states : ]) pg.stashes['deprioritized'] = pg.deprioritized[ : -self._num_fallback_states] def _categorize_states(self, pg): """ Categorize all states into two different groups: reaching the destination within the peek depth, and not reaching the destination within the peek depth. :param SimulationManager pg: The simulation manager that contains states. All active states (state belonging to "active" stash) are subjected to categorization. :return: The categorized simulation manager. :rtype: angr.SimulationManager """ past_active_states = len(pg.active) # past_deprioritized_states = len(pg.deprioritized) for goal in self._goals: for p in pg.active: if self._check_goals(goal, p): if self._goal_satisfied_callback is not None: self._goal_satisfied_callback(goal, p, pg) pg.stash( filter_func=lambda p: all(not goal.check(self._cfg, p, peek_blocks=self._peek_blocks) for goal in self._goals ), from_stash='active', to_stash='deprioritized', ) if pg.active: # TODO: pick some states from depriorized stash to active stash to avoid overfitting pass active_states = len(pg.active) # deprioritized_states = len(pg.deprioritized) l.debug('%d/%d active states are deprioritized.', past_active_states - active_states, past_active_states) return pg def _check_goals(self, goal, state): # pylint:disable=no-self-use """ Check if the state is satisfying the goal. :param BaseGoal goal: The goal to check against. :param angr.SimState state: The state to check. :return: True if the state satisfies the goal currently, False otherwise. :rtype: bool """ return goal.check_state(state)
#!/usr/bin/env python3 # # This file is part of LiteX-Boards. # # Copyright (c) Greg Davill <greg.davill@gmail.com> # SPDX-License-Identifier: BSD-2-Clause import os import sys import argparse from migen import * from migen.genlib.misc import WaitTimer from migen.genlib.resetsync import AsyncResetSynchronizer from litex_boards.platforms import orangecrab from litex.build.lattice.trellis import trellis_args, trellis_argdict from litex.soc.cores.clock import * from litex.soc.integration.soc_core import * from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser from litedram.modules import MT41K64M16, MT41K128M16, MT41K256M16, MT41K512M16 from litedram.phy import ECP5DDRPHY # CRG --------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq, with_usb_pll=False): self.rst = Signal() self.clock_domains.cd_por = ClockDomain(reset_less=True) self.clock_domains.cd_sys = ClockDomain() # # # # Clk / Rst clk48 = platform.request("clk48") rst_n = platform.request("usr_btn", loose=True) if rst_n is None: rst_n = 1 # Power on reset por_count = Signal(16, reset=2**16-1) por_done = Signal() self.comb += self.cd_por.clk.eq(clk48) self.comb += por_done.eq(por_count == 0) self.sync.por += If(~por_done, por_count.eq(por_count - 1)) # PLL self.submodules.pll = pll = ECP5PLL() self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst) pll.register_clkin(clk48, 48e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # USB PLL if with_usb_pll: self.clock_domains.cd_usb_12 = ClockDomain() self.clock_domains.cd_usb_48 = ClockDomain() usb_pll = ECP5PLL() self.submodules += usb_pll self.comb += usb_pll.reset.eq(~por_done) usb_pll.register_clkin(clk48, 48e6) usb_pll.create_clkout(self.cd_usb_48, 48e6) usb_pll.create_clkout(self.cd_usb_12, 12e6) # FPGA Reset (press usr_btn for 1 second to fallback to bootloader) reset_timer = WaitTimer(int(48e6)) reset_timer = ClockDomainsRenamer("por")(reset_timer) self.submodules += reset_timer self.comb += reset_timer.wait.eq(~rst_n) self.comb += platform.request("rst_n").eq(~reset_timer.done) class _CRGSDRAM(Module): def __init__(self, platform, sys_clk_freq, with_usb_pll=False): self.rst = Signal() self.clock_domains.cd_init = ClockDomain() self.clock_domains.cd_por = ClockDomain(reset_less=True) self.clock_domains.cd_sys = ClockDomain() self.clock_domains.cd_sys2x = ClockDomain() self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True) # # # self.stop = Signal() self.reset = Signal() # Clk / Rst clk48 = platform.request("clk48") rst_n = platform.request("usr_btn", loose=True) if rst_n is None: rst_n = 1 # Power on reset por_count = Signal(16, reset=2**16-1) por_done = Signal() self.comb += self.cd_por.clk.eq(clk48) self.comb += por_done.eq(por_count == 0) self.sync.por += If(~por_done, por_count.eq(por_count - 1)) # PLL sys2x_clk_ecsout = Signal() self.submodules.pll = pll = ECP5PLL() self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst) pll.register_clkin(clk48, 48e6) pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq) pll.create_clkout(self.cd_init, 24e6) self.specials += [ Instance("ECLKBRIDGECS", i_CLK0 = self.cd_sys2x_i.clk, i_SEL = 0, o_ECSOUT = sys2x_clk_ecsout), Instance("ECLKSYNCB", i_ECLKI = sys2x_clk_ecsout, i_STOP = self.stop, o_ECLKO = self.cd_sys2x.clk), Instance("CLKDIVF", p_DIV = "2.0", i_ALIGNWD = 0, i_CLKI = self.cd_sys2x.clk, i_RST = self.reset, o_CDIVX = self.cd_sys.clk), AsyncResetSynchronizer(self.cd_sys, ~pll.locked | self.reset), AsyncResetSynchronizer(self.cd_sys2x, ~pll.locked | self.reset), ] # USB PLL if with_usb_pll: self.clock_domains.cd_usb_12 = ClockDomain() self.clock_domains.cd_usb_48 = ClockDomain() usb_pll = ECP5PLL() self.submodules += usb_pll self.comb += usb_pll.reset.eq(~por_done) usb_pll.register_clkin(clk48, 48e6) usb_pll.create_clkout(self.cd_usb_48, 48e6) usb_pll.create_clkout(self.cd_usb_12, 12e6) # FPGA Reset (press usr_btn for 1 second to fallback to bootloader) reset_timer = WaitTimer(int(48e6)) reset_timer = ClockDomainsRenamer("por")(reset_timer) self.submodules += reset_timer self.comb += reset_timer.wait.eq(~rst_n) self.comb += platform.request("rst_n").eq(~reset_timer.done) # BaseSoC ------------------------------------------------------------------------------------------ class BaseSoC(SoCCore): def __init__(self, revision="0.2", device="25F", sdram_device="MT41K64M16", sys_clk_freq=int(48e6), toolchain="trellis", with_led_chaser=True, **kwargs): platform = orangecrab.Platform(revision=revision, device=device ,toolchain=toolchain) # Serial ----------------------------------------------------------------------------------- if kwargs["uart_name"] in ["serial", "usb_acm"]: kwargs["uart_name"] = "usb_acm" # Defaults to USB ACM through ValentyUSB. os.system("git clone https://github.com/litex-hub/valentyusb -b hw_cdc_eptri") sys.path.append("valentyusb") # SoCCore ---------------------------------------------------------------------------------- SoCCore.__init__(self, platform, sys_clk_freq, ident = "LiteX SoC on OrangeCrab", **kwargs) # CRG -------------------------------------------------------------------------------------- with_usb_pll = kwargs.get("uart_name", None) == "usb_acm" crg_cls = _CRGSDRAM if not self.integrated_main_ram_size else _CRG self.submodules.crg = crg_cls(platform, sys_clk_freq, with_usb_pll) # DDR3 SDRAM ------------------------------------------------------------------------------- if not self.integrated_main_ram_size: available_sdram_modules = { "MT41K64M16": MT41K64M16, "MT41K128M16": MT41K128M16, "MT41K256M16": MT41K256M16, "MT41K512M16": MT41K512M16, } sdram_module = available_sdram_modules.get(sdram_device) ddram_pads = platform.request("ddram") self.submodules.ddrphy = ECP5DDRPHY( pads = ddram_pads, sys_clk_freq = sys_clk_freq, cmd_delay = 0 if sys_clk_freq > 64e6 else 100) self.ddrphy.settings.rtt_nom = "disabled" if hasattr(ddram_pads, "vccio"): self.comb += ddram_pads.vccio.eq(0b111111) if hasattr(ddram_pads, "gnd"): self.comb += ddram_pads.gnd.eq(0) self.comb += self.crg.stop.eq(self.ddrphy.init.stop) self.comb += self.crg.reset.eq(self.ddrphy.init.reset) self.add_sdram("sdram", phy = self.ddrphy, module = sdram_module(sys_clk_freq, "1:2"), l2_cache_size = kwargs.get("l2_size", 8192) ) # Leds ------------------------------------------------------------------------------------- if with_led_chaser: self.submodules.leds = LedChaser( pads = platform.request_all("user_led"), sys_clk_freq = sys_clk_freq) # Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description="LiteX SoC on OrangeCrab") parser.add_argument("--build", action="store_true", help="Build bitstream.") parser.add_argument("--load", action="store_true", help="Load bitstream.") parser.add_argument("--toolchain", default="trellis", help="FPGA toolchain (trellis or diamond).") parser.add_argument("--sys-clk-freq", default=48e6, help="System clock frequency.") parser.add_argument("--revision", default="0.2", help="Board Revision (0.1 or 0.2).") parser.add_argument("--device", default="25F", help="ECP5 device (25F, 45F or 85F).") parser.add_argument("--sdram-device", default="MT41K64M16", help="SDRAM device (MT41K64M16, MT41K128M16, MT41K256M16 or MT41K512M16).") parser.add_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support.") builder_args(parser) soc_core_args(parser) trellis_args(parser) args = parser.parse_args() soc = BaseSoC( toolchain = args.toolchain, revision = args.revision, device = args.device, sdram_device = args.sdram_device, sys_clk_freq = int(float(args.sys_clk_freq)), **soc_core_argdict(args)) if args.with_spi_sdcard: soc.add_spi_sdcard() builder = Builder(soc, **builder_argdict(args)) builder_kargs = trellis_argdict(args) if args.toolchain == "trellis" else {} builder.build(**builder_kargs, run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit")) if __name__ == "__main__": main()
from __future__ import division import json import os import pickle import pprint from random import shuffle import numpy import patterny.semafor.frame as _frame from patterny.db import dao from patterny.config import Config from patterny.ml.similarity import ProblemSimilarity from patterny.semafor.adapter import SemaforClient from test_main import PatternyTest from utils import SCRIPT_FOLDER, KDE_DATASET from patterny.ml.tmodel import TopicModel from patterny.analysis.summary import ProblemAnalysisTextRankSummary from patterny.analysis.analyzer import BugAnalyses ids_xapian_search = [ 122437, 152671, 158633, 209415, 244091, 255384, 256397, 277464, 103266, 112958 ] ids_plasma_end_of_life = [ 243827, 186430, 343403, 194763, 255624, 310708, 251472, 300577, 275449, 285000, 320085, 272742, 311833, 63804, 63808, 314642 ] ids_install_valgrind = [ 356321, 349053, 326601, 331159, 319865, 330782, 344376, 325350 ] ids_multiple_screens = [ 341674, 343369, 340267, 343772, 344671, 310168 ] ids_indexing = [ 222494, 269260, 205126, 164308 ] class TestProblemAnalysis(PatternyTest): def setup(self): self.pp = pprint.PrettyPrinter(indent=2) self.base_url = 'https://bugs.kde.org/show_bug.cgi?id={}' with open(KDE_DATASET) as f: self.config = Config(f) self.client = SemaforClient(self.config) def teardown(self): pass @staticmethod def randomize(bugs): result = [] index_shuf = range(len(bugs)) shuffle(index_shuf) for idx in index_shuf: result.append(bugs[idx]) return result @staticmethod def X_y(bugs, i): test = list(bugs[i]) train = [] for j in xrange(0, len(bugs)): if j != i: train += bugs[j] return train, test def test_script(self): # Uncomment if you want to rebuild the bugs' summaries # bug_analyses_file = os.path.join(SCRIPT_FOLDER, 'bug_analysis_summarization.sav') # pa_summary = ProblemAnalysisTextRankSummary(self.config) # pa_summary.run(filter_empty_summaries=True) # bug_analyses = BugAnalyses(pa_summary.bugs) # pickle.dump(bug_analyses, open(bug_analyses_file, 'wb')) # n_topics = 10 # n_top_words = 25 # n_samples = 50 # n_features = 150 # iterations = 100 n_topics = 50 # should be 100 n_top_words = 25 n_samples = None n_features = 1500 iterations = 500 bug_analyses_file = os.path.join(SCRIPT_FOLDER, 'bug_analysis_summarization.sav') bug_analyses = pickle.load(open(bug_analyses_file, 'rb')) # TODO: look how to work with this guy later on semafor_frames_file = os.path.join(SCRIPT_FOLDER, 'semafor_new_analysis.json') with open(semafor_frames_file, 'rb') as data: bug_semafor_map = json.load(data) # # bug_analyses_file = os.path.join(SCRIPT_FOLDER, 'bug_analysis_new_summarization.sav') # bug_analyses = pickle.load(open(bug_analyses_file, 'rb')) # # similar_bug_analysis_file = os.path.join(SCRIPT_FOLDER, 'similar_bug_analysis_revised.json') # with open(similar_bug_analysis_file, 'rb') as data: # similar_bug_analysis = json.load(data) # # random_bug_file = os.path.join(SCRIPT_FOLDER, 'random_bug_list.sav') # bugs = pickle.load(open(random_bug_file, 'rb')) max_k = 10 bug_ids = bug_analyses.ids keys = ['ids_xapian_search', 'ids_plasma_end_of_life', 'ids_install_valgrind', 'ids_multiple_screens', 'ids_indexing'] # keys = ['ids_plasma_end_of_life', 'ids_install_valgrind', # 'ids_multiple_screens', 'ids_indexing'] keys = ['ids_xapian_search'] for i, key in enumerate(keys): print key exclude_ids = self.exclude(key) X_summarized_bugs = bug_analyses.filter([bug for bug in bug_ids if bug not in exclude_ids]) y_summarized_bugs = bug_analyses.filter([bug for bug in bug_ids if bug in exclude_ids]) tmodel = TopicModel(n_samples=n_samples, n_features=n_features, n_topics=n_topics, iterations=iterations, n_top_words=n_top_words, threshold=0.3) tmodel.build(X_summarized_bugs.analyses, debug=True, analyses=bug_analyses) model = ProblemSimilarity() model.build(X_summarized_bugs) # TODO: tmodel and check the topic of each one of the ids j_precision = [] for j in xrange(0, len(y_summarized_bugs.ids)): print 'Processing :: p{}'.format(j) try: bug_id = y_summarized_bugs.ids[j] problem = y_summarized_bugs.problems[j] analysis = y_summarized_bugs.analyses[j] analysis_sentences = y_summarized_bugs.analyses_sentences[j] recommended_similar_bugs = model.similarity(bug_id, problem, threshold=0.2) if recommended_similar_bugs and len(recommended_similar_bugs) > max_k: recommended_similar_bugs = recommended_similar_bugs[0:max_k] topics_for_similar_bugs = tmodel.topics_of( [s['id'] for s in recommended_similar_bugs] ) top_topics = self.find_top_topics(topics_for_similar_bugs, recommended_similar_bugs) if top_topics: self.metrics_for( bug_id, analysis, analysis_sentences, j_precision, top_topics, tmodel, bug_semafor_map, recommended_similar_bugs ) except Exception as ex: print '>> Error on :: p{} :: {}'.format(j, ex.message) print '\n\nDone evaluating {}'.format(key) if j_precision: precision = numpy.average(j_precision) print '>> {:4.4f}'.format(precision) print 'done' def metrics_for(self, bug_id, analysis, analysis_sentences, j_precision, top_topics, tmodel, bug_semafor_map, recommended_similar_bugs): t = next(iter(top_topics)) if t['topic'] in tmodel.topics: t_ids = tmodel.topics[t['topic']].ids t_analyses = tmodel.topics[t['topic']].analyses t_analyses_sentences = tmodel.topics[t['topic']].analyses_sentences _frames, _ids, _sentences = self.sentence_semantic_roles( bug_semafor_map, t_analyses_sentences, t_ids ) data = _frame.dataframe(_sentences, _frames, ids=_ids) topic_synthesized_analysis = _frame.synthesize(data) recommended = self.extract_recommended_analysis(topic_synthesized_analysis, recommended_similar_bugs) _aux_precision = self.precision_of(bug_id, analysis, analysis_sentences, recommended, bug_semafor_map) print _aux_precision j_precision.append(_aux_precision) def sentence_semantic_roles(self, bug_semafor_map, t_analyses_sentences, t_ids): _sentences = [] _frames = [] _ids = [] for w, _id in enumerate(t_ids): if str(str(_id)) in bug_semafor_map: semafor_frame = bug_semafor_map[str(_id)] current_frames = semafor_frame['frames'] if current_frames: partial_frames = [_frame.parse(frame) for frame in current_frames] _frames += partial_frames _sentences += t_analyses_sentences[w] _ids += [_id for _ in xrange(0, len(partial_frames))] return _frames, _ids, _sentences @staticmethod def extract_recommended_analysis(topic_synthesized_analysis, recommended_similar_bugs): result = [] bug_ids = [sb['id'] for sb in recommended_similar_bugs] for synthesized_analysis in topic_synthesized_analysis: if synthesized_analysis.contains_any(bug_ids): result.append(synthesized_analysis) return result @staticmethod def exclude(sample): # 15 if sample == 'ids_xapian_search': return [277464] elif sample == 'ids_plasma_end_of_life': return [194763, 251472, 285000, 63804, 314642] # return [63804, 314642] elif sample == 'ids_install_valgrind': return [356321, 331159, 344376] elif sample == 'ids_multiple_screens': return [341674, 343772, 310168] elif sample == 'ids_indexing': return [222494, 205126] else: return [] @staticmethod def similar_bugs(sample): if sample == 'ids_xapian_search': return ids_xapian_search elif sample == 'ids_plasma_end_of_life': return ids_plasma_end_of_life elif sample == 'ids_install_valgrind': return ids_install_valgrind elif sample == 'ids_multiple_screens': return ids_multiple_screens elif sample == 'ids_indexing': return ids_multiple_screens else: return [] @staticmethod def find_top_topics(topics_for_similar_bugs, recommended_similar_bugs, k=5): result = [] len_similar_bugs = len(recommended_similar_bugs) if len_similar_bugs <= 1: return None topic_count = {} for bug, topics in topics_for_similar_bugs.iteritems(): for topic_id in topics: if topic_id not in topic_count: topic_count[topic_id] = dict(count=0, prob=1) topic_count[topic_id]['count'] += 1 current_prob = list( map(lambda i: i['prob'], list( filter(lambda j: j['id'] == bug, recommended_similar_bugs)) ) ) if current_prob: topic_count[topic_id]['prob'] *= next(iter(current_prob)) for topic, data in topic_count.iteritems(): result.append(dict(topic=topic, count=data['count'], prob=data['prob'])) if result: result = sorted(result, key=lambda k: (k['count'], k['prob']), reverse=True) top = next(iter(result)) if top['count'] == len_similar_bugs: return [top] elif len(result) >= 2: itr = iter(result) first_topic = next(itr) second_topic = next(itr) if first_topic['count'] == 1 and first_topic['count'] == second_topic['count']: return None else: if k > len(result): k = len(result) return result[:k] return None @staticmethod def _parse_tokens(data): if 'tokens' in data: # https://stackoverflow.com/questions/17683062/join-string-before-between-and-after lst = [entry.encode('utf-8') for entry in data['tokens']] return '{}{}'.format(' '.join(lst[:len(lst) - 1]), lst[-1]) return [] @staticmethod def precision_of(bug_id, analysis, analysis_sentences, recommended, bug_semafor_map): if str(bug_id) in bug_semafor_map: semafor_frame = bug_semafor_map[str(bug_id)] current_frames = semafor_frame['frames'] if current_frames: frames = [_frame.parse(frame) for frame in current_frames] bug_ids = [] bugs_with_similar_analyses = {} for recommended_analysis in recommended: bug_ids += recommended_analysis.bug_ids() for id in bug_ids: if id not in bugs_with_similar_analyses: bugs_with_similar_analyses[id] = False for recommended_analysis in recommended: if recommended_analysis.similar_to(analysis_sentences, frames): tp_ids = recommended_analysis.bug_ids() for id in tp_ids: bugs_with_similar_analyses[id] = True tp = len(list(filter(lambda b: b is True, bugs_with_similar_analyses.values()))) total = len(bugs_with_similar_analyses.values()) result = tp / total return result return 0
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=superfluous-parens, redefined-outer-name, redefined-outer-name,pointless-string-statement # pylint: disable=consider-using-enumerate,invalid-name """Tuning record and serialization format""" import argparse import base64 import logging import multiprocessing import pickle import json import time import os import itertools from collections import OrderedDict import numpy as np from .. import build, lower from ..target import Target from .. import __version__ from . import task from .task import ConfigEntity, ApplyHistoryBest from .measure import MeasureInput, MeasureResult AUTOTVM_LOG_VERSION = 0.2 _old_version_warning = True logger = logging.getLogger("autotvm") try: # convert unicode to str for python2 _unicode = unicode except NameError: _unicode = () try: _long = long except NameError: _long = int def measure_str_key(inp, include_config=True): """get unique str key for MeasureInput Parameters ---------- inp: autotvm.measure.MeasureInput input for the measure include_config: bool, optional whether includes config in the str key Returns ------- key: str The str representation of key """ config_str = str(inp.config) if include_config else "" return "".join( [str(inp.target), inp.task.name, str(inp.task.args), str(inp.task.kwargs), config_str] ) def encode(inp, result, protocol="json"): """encode (MeasureInput, MeasureResult) pair to a string Parameters ---------- inp: autotvm.measure.MeasureInput result: autotvm.measure.MeasureResult pair of input/result protocol: str log protocol, json or pickle Returns ------- row: str a row in the logger file """ if protocol == "json": json_dict = { "input": (str(inp.target), inp.task.name, inp.task.args, inp.task.kwargs), "config": inp.config.to_json_dict(), "result": ( result.costs if result.error_no == 0 else (1e9,), result.error_no, result.all_cost, result.timestamp, ), "version": AUTOTVM_LOG_VERSION, "tvm_version": __version__, } return json.dumps(json_dict) if protocol == "pickle": row = ( str(inp.target), str( base64.b64encode( pickle.dumps([inp.task.name, inp.task.args, inp.task.kwargs]) ).decode() ), str(base64.b64encode(pickle.dumps(inp.config)).decode()), str(base64.b64encode(pickle.dumps(tuple(result))).decode()), str(AUTOTVM_LOG_VERSION), str(__version__), ) return "\t".join(row) raise RuntimeError("Invalid log protocol: " + protocol) def decode(row, protocol="json"): """Decode encoded record string to python object Parameters ---------- row : str a row in the logger file protocol : str log protocol, json or pickle Returns ------- ret : tuple(autotvm.measure.MeasureInput, autotvm.measure.MeasureResult), or None The tuple of input and result, or None if input uses old version log format. """ # pylint: disable=unused-variable global _old_version_warning if protocol == "json": row = json.loads(row) if "v" in row and row["v"] == 0.1: if _old_version_warning: logger.warning("AutoTVM log version 0.1 is no longer supported.") _old_version_warning = False return None tgt, task_name, task_args, task_kwargs = row["input"] tgt = str(tgt) if "-target" in tgt: logger.warning('"-target" is deprecated, use "-mtriple" instead.') tgt = tgt.replace("-target", "-mtriple") tgt = Target(str(tgt)) def clean_json_to_python(x): """1. Convert all list in x to tuple (hashable) 2. Convert unicode to str for python2 """ if isinstance(x, list): return tuple([clean_json_to_python(a) for a in x]) if isinstance(x, _unicode): return str(x) if isinstance(x, (_long, int)): return int(x) return x tsk = task.Task(clean_json_to_python(task_name), clean_json_to_python(task_args)) config = ConfigEntity.from_json_dict(row["config"]) inp = MeasureInput(tgt, tsk, config) result = MeasureResult(*[tuple(x) if isinstance(x, list) else x for x in row["result"]]) config.cost = np.mean(result.costs) return inp, result if protocol == "pickle": items = row.split("\t") if len(items) == 4: if _old_version_warning: logger.warning("AutoTVM log version 0.1 is no longer supported.") _old_version_warning = False return None tgt = Target(items[0]) task_tuple = pickle.loads(base64.b64decode(items[1].encode())) config = pickle.loads(base64.b64decode(items[2].encode())) result = MeasureResult(*pickle.loads(base64.b64decode(items[3].encode()))) config.cost = np.mean(result.costs) tsk = task.Task(task_tuple[0], task_tuple[1]) return MeasureInput(tgt, tsk, config), result raise RuntimeError("Invalid log protocol: " + protocol) def load_from_file(filename): """Generator: load records from file. This is a generator that yields the records. Parameters ---------- filename: str Yields ------ input: autotvm.measure.MeasureInput result: autotvm.measure.MeasureResult """ for row in open(filename): if row and not row.startswith("#"): ret = decode(row) if ret is None: continue yield ret def split_workload(in_file, clean=True): """Split a log file into separate files, each of which contains only a single workload This function can also delete duplicated records in log file Parameters ---------- in_file: str input filename clean: bool whether delete duplicated items """ tic = time.time() lines = list(open(in_file).readlines()) logger.info("start converting...") pool = multiprocessing.Pool() lines = [rec for rec in pool.map(decode, lines) if rec is not None] logger.info("map done %.2f", time.time() - tic) wkl_dict = OrderedDict() for inp, res in lines: wkl = measure_str_key(inp, False) if wkl not in wkl_dict: wkl_dict[wkl] = [] wkl_dict[wkl].append([inp, res]) if clean: for i, (k, v) in enumerate(wkl_dict.items()): # clean duplicated items added = set() cleaned = [] for inp, res in v: str_key = measure_str_key(inp) if str_key in added: continue added.add(str_key) cleaned.append([inp, res]) # write to file logger.info("Key: %s\tValid: %d\tDup: %d\t", k, len(cleaned), len(v) - len(cleaned)) with open(args.i + ".%03d.wkl" % i, "w") as fout: for inp, res in cleaned: fout.write(encode(inp, res) + "\n") else: for i, (k, v) in enumerate(wkl_dict.items()): logger.info("Key: %s\tNum: %d", k, len(v)) with open(args.i + ".%03d.wkl" % i, "w") as fout: for inp, res in v: fout.write(encode(inp, res) + "\n") def pick_best(in_file, out_file): """ Pick best entries from a file and store it to another file. This distill the useful log entries from a large log file. If out_file already exists, the best entries from both in_file and out_file will be saved. Parameters ---------- in_file: str The filename of input out_file: str or file The filename of output """ context = load_from_file(in_file) if os.path.isfile(out_file): out_context = load_from_file(out_file) context = itertools.chain(context, out_context) context, context_clone = itertools.tee(context) best_context = ApplyHistoryBest(context) best_set = set() for v in best_context.best_by_model.values(): best_set.add(measure_str_key(v[0])) for v in best_context.best_by_targetkey.values(): best_set.add(measure_str_key(v[0])) logger.info("Extract %d best records from the %s", len(best_set), in_file) fout = open(out_file, "w") if isinstance(out_file, str) else out_file for inp, res in context_clone: if measure_str_key(inp) in best_set: fout.write(encode(inp, res) + "\n") best_set.remove(measure_str_key(inp)) """ Usage: This record executable module has three modes. * Print log file in readable format e.g. python -m tvm.autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code * Extract history best from a large log file e.g. python -m tvm.autotvm.record --mode pick --i collect.log * Split a log file into separate files, each of which contains only a single wkl e.g. python -m tvm.autotvm.record --mode split --i collect.log """ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--mode", choices=["read", "pick", "split"], default="read") parser.add_argument("--i", type=str, help="input file") parser.add_argument("--o", type=str, default=None, help="output file") parser.add_argument("--begin", type=int, default=0) parser.add_argument("--end", type=int, default=5) parser.add_argument("--ir", action="store_true") parser.add_argument("--code", action="store_true") args = parser.parse_args() logging.basicConfig(level=logging.INFO) if args.mode == "pick": args.o = args.o or args.i + ".best.log" pick_best(args.i, args.o) elif args.mode == "read": for i, (inp, result) in enumerate(load_from_file(args.i)): if args.begin <= i < args.end: with inp.target: s, arg_bufs = inp.task.instantiate(inp.config) print("") print(inp.target, inp.task, inp.config) print(result) if args.ir: with inp.target: print(lower(s, arg_bufs, simple_mode=True)) if args.code: with inp.target: func = build(s, arg_bufs) print(func.imported_modules[0].get_source()) elif args.mode == "split": split_workload(args.i)
#!/usr/bin/env python # # T. Carman Spring/Summer 2016 # import os # general path manipulations import shutil # cleaning up files import glob # listing/finding data files import json # reading data files import signal # for exiting gracefully import itertools import tarfile # opening archived data import argparse # command line interface import textwrap # help formatting import numpy as np # general maths import pandas as pd # for timeseries plots import matplotlib.pyplot as plt # general plotting import sys if sys.version_info[0] < 3: from StringIO import StringIO else: from io import StringIO def exit_gracefully(signum, frame): '''A function for quitting w/o leaving a stacktrace on the users console.''' print "Caught signal='%s', frame='%s'. Quitting - gracefully." % (signum, frame) sys.exit(1) # Generator function for extracting specific files from a tar archive def monthly_files(tarfileobj): '''Get the */monthly/*.json files...''' for tarinfo in tarfileobj: if 'monthly' in tarinfo.name: yield tarinfo def analyze(cjd, pjd): '''Extract every ounce of knowledge from a pair of json data objects. Returns a dict with all the data. ''' vasc = [0,1,2,3,4] nonvasc = [5,6,7] results = {} #results['C veg'] = bal_C_veg(cjd, pjd, xsec='all') results['C veg'] = bal_C_veg(cjd, pjd, xsec='all') #results['C veg del'] = bal_C_veg(cjd, pjd, xsec='all').delta results['C veg vasc'] = bal_C_veg(cjd, pjd, xsec='vasc') #results['C veg vasc del'] = bal_C_veg(cjd, pjd, xsec='vasc').delta results['C veg nonvasc'] = bal_C_veg(cjd, pjd, xsec='nonvasc') #results['C veg nonvasc del'] = bal_C_veg(cjd, pjd, xsec='nonvasc').delta results['C soil'] = bal_C_soil(cjd, pjd) #results['C soil del'] = bal_C_soil(cjd, pjd).delta results['N veg tot'] = bal_N_veg_tot(cjd, pjd, xsec='all') results['N veg str'] = bal_N_veg_str(cjd, pjd, xsec='all') results['N veg lab'] = bal_N_veg_lab(cjd, pjd, xsec='all') results['N soil org'] = bal_N_soil_org(cjd, pjd) results['N soil avl'] = bal_N_soil_avl(cjd, pjd) results['N veg vasc tot'] = bal_N_veg_tot(cjd, pjd, xsec='vasc') results['N veg vasc str'] = bal_N_veg_str(cjd, pjd, xsec='vasc') results['N veg vasc lab'] = bal_N_veg_lab(cjd, pjd, xsec='vasc') results['N veg nonvasc tot'] = bal_N_veg_tot(cjd, pjd, xsec='nonvasc') results['N veg nonvasc str'] = bal_N_veg_str(cjd, pjd, xsec='nonvasc') results['N veg nonvasc lab'] = bal_N_veg_lab(cjd, pjd, xsec='nonvasc') return results def file_loader(**kwargs): '''Returns a list of files to open''' if 'fileslice' in kwargs: slice_string = kwargs['fileslice'] # parse string into slice object # https://stackoverflow.com/questions/680826/python-create-slice-object-from-string/681949#681949 custom_slice = slice(*map(lambda x: int(x.strip()) if x.strip() else None, slice_string.split(':'))) else: custom_slice = slice(None,None,None) if "fromarchive" in kwargs: tf = tarfile.open(kwargs['fromarchive']) # might be able to use tar checksum (crc) to implement some kind of caching... TMP_EXTRACT_LOCATION = '/tmp/com.iab.dvmdostem.diagnostics.23f23f2' # <-- could be a checksum of the tar? if ( os.path.isdir(TMP_EXTRACT_LOCATION) or os.path.isfile(TMP_EXTRACT_LOCATION) ): print "Cleaning up the temporary location: ", TMP_EXTRACT_LOCATION shutil.rmtree(TMP_EXTRACT_LOCATION) tf.extractall(TMP_EXTRACT_LOCATION, members=monthly_files(tf)) full_glob = os.path.join(TMP_EXTRACT_LOCATION, "/tmp/dvmdostem/calibration/monthly/*.json") print "Matching this pattern: ", full_glob jfiles = glob.glob(full_glob) else: pattern_string = "/tmp/dvmdostem/calibration/monthly/*.json" print "Looking for json files matching pattern:", pattern_string jfiles = glob.glob(pattern_string) print "Custom file slice:", custom_slice jfiles = jfiles[custom_slice] return jfiles def onclick(event): if event.xdata != None and event.ydata != None: i_edy = np.rint(event.ydata) # rint - convert to integer with rounding i_edx = np.rint(event.xdata) cax = event.inaxes caximg = cax.images[0] print "Axes: %s" % (cax.get_title().replace("\n", " ")) print "Data coordinates (y, x): ", "(%s,%s)"%(event.ydata, event.xdata) print "Data coords as int: ", "(%s,%s)"%(i_edy, i_edx) print "Data at[%s, %s]: %s" % (i_edy, i_edx, caximg.get_array()[i_edy, i_edx]) print if event.key == 'ctrl+c': print "Captured Ctrl-C. Quit nicely." exit_gracefully(event.key, None) # <-- need to pass something for frame ?? def error_image(**kwargs): '''Returns an array with dimensions (yrs,months) for the error variable.''' if "plotlist" not in kwargs: plotlist = ['C veg', 'C soil', 'N veg tot', 'N veg str', 'N veg lab', 'N soil org', 'N soil avl'] else: plotlist = kwargs["plotlist"] jfiles = sorted(file_loader(**kwargs)) # Figure out the month and year for the first and last # data files. Assumes that the datafiles are contiguous. with open(jfiles[0], 'r') as f: jdata = json.load(f) m1 = int(jdata["Month"]) y1 = (jdata["Year"]) with open(jfiles[-1], 'r') as f: jdata = json.load(f) mlast = (jdata["Month"]) yrlast = (jdata["Year"]) # Pad out the array so it fills an even # number of months/years. So if the slice # lands on month 3 and end on month 10, # add 3 empty months at the beginning # and 1 at the end. Helps for displaying as an image... empty = np.empty(len(jfiles) + m1 + (11-mlast)) * np.nan # Make room for a lot of data imgarrays_err = np.array([np.copy(empty) for i in plotlist]) imgarrays_delta = np.array([np.copy(empty) for i in plotlist]) # Run over all the files, calculating all the derived # diagnostics. pjd = None for idx, jfile in enumerate(jfiles): with open(jfile, 'r') as f: jdata = json.load(f) diagnostics = analyze(jdata, pjd) for pltnum, key in enumerate(plotlist): imgarrays_err[pltnum][idx+m1] = diagnostics[key].err imgarrays_delta[pltnum][idx+m1] = diagnostics[key].delta pjd = jdata image_plot(imgarrays_err, plotlist) image_plot(imgarrays_delta, plotlist) image_plot(imgarrays_err/imgarrays_delta, plotlist) def image_plot(imgarrays, plotlist): from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import LogNorm from matplotlib.ticker import MultipleLocator from matplotlib.ticker import MaxNLocator import matplotlib.ticker as mtkr # undertake the plotting of the now full arrays.. fig, axar = plt.subplots(1, len(imgarrays), sharex=True, sharey=True) fig.set_tight_layout(True) cid = fig.canvas.mpl_connect('button_press_event', onclick) #mpldatacursor.datacursor(display='single') for axidx, data in enumerate(imgarrays): print "Plotting for axes %s" % axidx print "-------------------------------------------" # We are going to use a divergent color scheme centered around zero, # so we need to find largest absolute value of the data and use that # as the endpoints for the color-scaling. xval = np.nanmax(np.abs(data)) print "Color map range for ax[%s]: %s" % (axidx, xval) # It is also handy to mask out the values that are zero (no error) # or riduculously close to zero (effectively zero) print "Valid values before masking values close to zero: ", np.count_nonzero(~np.isnan(data)) data = np.ma.masked_equal(data, 0) maskclose = np.isclose(data, np.zeros(data.shape)) data = np.ma.masked_array(data, mask=maskclose) data = np.ma.masked_invalid(data) print "Remaining data after masking near-zero data: ", data.count() # Transform data to 2D shape for showing as an image data = data.reshape(len(data)/12, 12) divider = make_axes_locatable(axar[axidx]) cwm = plt.cm.coolwarm cwm.set_bad('white',1.0) cwm.set_over('yellow',1.0) # <- nothing should be ouside the colormap range... cwm.set_under('orange',1.0) colax = divider.append_axes("bottom", size="3%", pad="10%") # Display the data as an image im = axar[axidx].imshow( data, interpolation="nearest", cmap="coolwarm", vmin=-xval, vmax=xval, aspect='auto' # helps with non-square images... ) loc = MultipleLocator(base=1.0) # this locator puts ticks at regular intervals axar[axidx].xaxis.set_major_locator(loc) axar[axidx].grid(True, axis='both') cbar = plt.colorbar(im, cax=colax, orientation='horizontal', format="%0.8f", ticks=mtkr.MaxNLocator(6, prune=None)) plt.setp(colax.xaxis.get_majorticklabels(), rotation=90) #axar[axidx].yaxis.set_visible(False) #axar[axidx].yaxis.set_major_locator(mtkr.MultipleLocator(5)) #axar[axidx].tick_params(axis='y', direction='in', length=3, width=.5, colors='k', labelleft='off', labelright='off') axar[axidx].set_xlabel("Month") #axar[axidx].xaxis.set_major_locator(mtkr.MaxNLocator(5, integer=True)) # 5 seems to be magic number; works with zooming. axar[axidx].tick_params(axis='x', direction='in', length=3, width=.5, colors='k') # Turn the y axis on for the leftmost plot axar[0].yaxis.set_visible(True) axar[0].set_ylabel("Year") #axar[0].tick_params(axis='y', direction='out', length=4, width=1, colors='k', labelleft='on', labelright='off') # set the titles for the subplots - rudimentady line-wrapping for long titles assert len(axar) == len(plotlist) # zip silently trucates longer list for x in zip(axar, plotlist): if len(x[1].split(' ')) > 3: print x[1].split(' ') l1 = ' '.join(x[1].split(' ')[0:3]) l2 = ' '.join(x[1].split(' ')[3:]) newX1 = "\n".join([l1, l2]) x[0].set_title(newX1) else: x[0].set_title(x[1]) plt.show(block=True) def plot_tests(test_list, **kwargs): #title = "------ %s ------" % t for t in test_list: data = compile_table_by_year(t, **kwargs) np.loadtxt(StringIO(data), skiprows=1) filter(None,data.split("\n")[0].split(" ")) df = pd.DataFrame( np.loadtxt(StringIO(data), skiprows=1), columns=filter(None,data.split("\n")[0].split(" ")) ) # fails to read some columns with many zeros - whole # column ends up NaN. May need to update pandas version #df = pd.read_csv(StringIO(data), header=0, delim_whitespace=True, na_values='NULL') print "plotting dataframe..." dfp = df.plot(subplots=True)#, grid=False) print "using matplotlib show..." plt.show(block=True) def run_tests(test_list, **kwargs): # write to file (w2f) if 'w2f' in kwargs: outfile = kwargs['w2f'] else: outfile = None if outfile: folder, fname = os.path.split(outfile) if folder != '': try: os.makedirs(folder) # keyword argument 'exists_ok=True' is for python 3.4+ except OSError: if not os.path.isdir(folder): raise print "clearing output file: ", outfile with open(outfile, 'w') as f: f.write("") for t in test_list: title = "------ %s ------" % t data = compile_table_by_year(t, **kwargs) # print to console (p2c) if 'p2c' in kwargs and kwargs['p2c'] == True: print title print data if outfile != None: with open(outfile, 'a') as f: print "appending to file: ", outfile f.write(title); f.write("\n") f.write(data) def compile_table_by_year(test_case, **kwargs): jfiles = file_loader(**kwargs) # map 'test case' strings to various test and # reporting functions we have written in the module. function_dict = { 'N_soil_balance': Check_N_cycle_soil_balance, 'N_veg_balance': Check_N_cycle_veg_balance, 'C_soil_balance': Check_C_cycle_soil_balance, 'C_veg_balance': Check_C_cycle_veg_balance, 'C_veg_vascular_balance': Check_C_cycle_veg_vascular_balance, 'C_veg_nonvascular_balance': Check_C_cycle_veg_nonvascular_balance, 'report_soil_C': Report_Soil_C } check_func = function_dict[test_case] header = check_func(0, header=True) table_data = "" for idx, jfile in enumerate(jfiles): with open(jfile, 'r') as f: jdata = json.load(f) prev_jdata = None if idx > 0: with open(jfiles[idx-1], 'r') as prev_jf: prev_jdata = json.load(prev_jf) row = check_func(idx, jd=jdata, pjd=prev_jdata, header=False) table_data = table_data + row full_report = header + table_data return full_report def sum_across(key, jdata, xsec): # Setup a dict for mapping community type numbers # to differnet combos of PFTs for vascular/non-vascular CMTLU = { 5: { 'all' : [0,1,2,3,4,5,6,7,8,9], 'vasc' : [0,1,2,3,4], 'nonvasc' : [5,6,7] } } pfts = CMTLU[5][xsec] total = np.nan if jdata != None: total = 0 for pft in ['PFT%i'%i for i in pfts]: if ( type(jdata[pft][key]) == dict ): # sniff out compartment variables if len(jdata[pft][key]) == 3: total += jdata[pft][key]["Leaf"] + jdata[pft][key]["Stem"] + jdata[pft][key]["Root"] else: print "Error?: incorrect number of compartments..." else: total += jdata[pft][key] return total def ecosystem_sum_soilC(jdata): total = np.nan if jdata != None: total = 0 total += jdata["RawCSum"] total += jdata["SomaSum"] total += jdata["SomcrSum"] total += jdata["SomprSum"] # total += jdata["CarbonMineralSum"] # total += jdata["CarbonDeep"] # total += jdata["CarbonShallow"] total += jdata["WoodyDebrisC"] return total class DeltaError(object): '''Simply used to allow convenient access to data via . operator.''' def __init__(self, _d, _e): self.delta = _d self.err = _e def bal_C_soil(curr_jd, prev_jd): delta = np.nan if prev_jd != None: delta = ecosystem_sum_soilC(curr_jd) - ecosystem_sum_soilC(prev_jd) sum_of_fluxes = sum_across("LitterfallCarbonAll", curr_jd, 'all') \ + sum_across("MossDeathC", curr_jd, 'all') \ + curr_jd["BurnVeg2SoiAbvVegC"] \ + curr_jd["BurnVeg2SoiBlwVegC"] \ + curr_jd["D2WoodyDebrisC"] \ - curr_jd["RH"] \ - curr_jd["BurnSoi2AirC"] err = delta - sum_of_fluxes return DeltaError(delta, err) def bal_C_veg(curr_jd, pjd, xsec='all'): delta = np.nan if pjd != None: delta = sum_across("VegCarbon", curr_jd, xsec) - sum_across("VegCarbon", pjd, xsec) if xsec == 'all': burn_flux = curr_jd["BurnVeg2AirC"] \ + curr_jd["BurnVeg2SoiAbvVegC"] \ + curr_jd["BurnVeg2SoiBlwVegC"] \ + curr_jd["BurnAbvVeg2DeadC"] sum_of_fluxes = sum_across("NPPAll", curr_jd, xsec) \ - sum_across("LitterfallCarbonAll", curr_jd, xsec) \ - sum_across("MossDeathC", curr_jd, xsec) \ - burn_flux if xsec == 'vasc': sum_of_fluxes = sum_across("NPPAll", curr_jd, xsec) \ - sum_across("LitterfallCarbonAll", curr_jd, xsec) if xsec == 'nonvasc': sum_of_fluxes = sum_across("NPPAll", curr_jd, xsec) \ - sum_across("LitterfallCarbonAll", curr_jd, xsec) \ - sum_across("MossDeathC", curr_jd, xsec) err = delta - sum_of_fluxes return DeltaError(delta, err) def bal_N_soil_org(jd, pjd): delta = np.nan if pjd != None: delta = jd["OrganicNitrogenSum"] - pjd["OrganicNitrogenSum"] sum_of_fluxes = sum_across("LitterfallNitrogenPFT", jd, 'all') \ + jd["MossdeathNitrogen"] \ + jd["BurnVeg2SoiAbvVegN"] \ + jd["BurnVeg2SoiBlwVegN"] \ - jd["NetNMin"] \ - jd["BurnSoi2AirN"] \ err = delta - sum_of_fluxes return DeltaError(delta, err) def bal_N_soil_avl(jd, pjd): delta = np.nan if pjd != None: delta = jd["AvailableNitrogenSum"] - pjd["AvailableNitrogenSum"] sum_of_fluxes = (jd["NetNMin"] + jd["AvlNInput"]) - (sum_across("TotNitrogenUptake", jd, 'all') + jd["AvlNLost"]) err = delta - sum_of_fluxes return DeltaError(delta, err) def bal_N_veg_tot(jd, pjd, xsec='all'): delta = np.nan if pjd != None: delta = sum_across("NAll", jd, xsec) - sum_across("NAll", pjd, xsec) if xsec == 'all': burn_flux = jd["BurnVeg2AirN"] \ + jd["BurnVeg2SoiAbvVegN"] \ + jd["BurnVeg2SoiBlwVegN"] \ + jd["BurnAbvVeg2DeadN"] sum_of_fluxes = sum_across("TotNitrogenUptake", jd, xsec) \ - sum_across("LitterfallNitrogenPFT", jd, xsec) \ - jd["MossdeathNitrogen"] \ - burn_flux if xsec == 'vasc': sum_of_fluxes = sum_across("TotNitrogenUptake", jd, xsec) \ - sum_across("LitterfallNitrogenPFT", jd, xsec) if xsec == 'nonvasc': sum_of_fluxes = sum_across("TotNitrogenUptake", jd, xsec) \ - sum_across("LitterfallNitrogenPFT", jd, xsec) \ - jd["MossdeathNitrogen"] err = delta - sum_of_fluxes return DeltaError(delta, err) def bal_N_veg_str(jd, pjd, xsec='all'): delta = np.nan if pjd != None: delta = sum_across("VegStructuralNitrogen", jd, xsec) - sum_across("VegStructuralNitrogen", pjd, xsec) # <-- will sum compartments if xsec == 'all': burn_flux = jd["BurnVeg2AirN"] \ + jd["BurnVeg2SoiAbvVegN"] \ + jd["BurnVeg2SoiBlwVegN"] \ + jd["BurnAbvVeg2DeadN"] sum_of_fluxes = sum_across("StNitrogenUptake", jd, xsec) \ + sum_across("NMobil", jd, xsec) \ - sum_across("LitterfallNitrogenPFT", jd, xsec) \ - jd["MossdeathNitrogen"] \ - sum_across("NResorb", jd, xsec) \ - burn_flux if xsec == 'vasc': sum_of_fluxes = sum_across("StNitrogenUptake", jd, xsec) \ + sum_across("NMobil", jd, xsec) \ - sum_across("LitterfallNitrogenPFT", jd, xsec) \ - sum_across("NResorb", jd, xsec) if xsec == 'nonvasc': sum_of_fluxes = sum_across("StNitrogenUptake", jd, xsec) + \ sum_across("NMobil", jd, xsec) - \ jd["MossdeathNitrogen"] - \ sum_across("NResorb", jd, xsec) err = delta - sum_of_fluxes return DeltaError(delta, err) def bal_N_veg_lab(jd, pjd, xsec='all'): delta = np.nan if pjd != None: delta = sum_across("VegLabileNitrogen", jd, xsec) - sum_across("VegLabileNitrogen", pjd, xsec) if xsec=='all' or xsec == 'vasc' or xsec == 'nonvasc': sum_of_fluxes = sum_across("LabNitrogenUptake", jd, xsec) \ + sum_across("NResorb", jd, xsec) \ - sum_across("NMobil", jd, xsec) err = delta - sum_of_fluxes return DeltaError(delta, err) def Check_N_cycle_veg_balance(idx, header=False, jd=None, pjd=None): '''Checking....?''' if header: return "{:<4} {:>6} {:>2} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10}\n".format( "idx", "yr", "m", "errT", "errS", "errL", "deltaN", "delNStr", "delNLab", "sumFlxT","sumFlxS", "sumFlxL" ) else: sum_str_N_flux = jd["StNitrogenUptakeAll"] - (sum_across("LitterfallNitrogenPFT", jd, 'all') + jd["MossdeathNitrogen"]) + sum_across("NMobil", jd, 'all') - sum_across("NResorb", jd, 'all') sum_lab_N_flux = sum_across("LabNitrogenUptake", jd, 'all') + sum_across("NResorb", jd, 'all') - sum_across("NMobil", jd, 'all') return "{:<4} {:>6} {:>2} {:>10.4f} {:>10.4f} {:>10.3f} {:>10.4f} {:>10.4f} {:>10.3f} {:>10.4f} {:>10.4f} {:>10.3f}\n".format( idx, jd["Year"], jd["Month"], bal_N_veg_tot(jd, pjd, xsec='all').err, bal_N_veg_str(jd, pjd, xsec='all').err, bal_N_veg_lab(jd, pjd, xsec='all').err, bal_N_veg_tot(jd, pjd, xsec='all').delta, bal_N_veg_str(jd, pjd, xsec='all').delta, bal_N_veg_lab(jd, pjd, xsec='all').delta, sum_str_N_flux + sum_lab_N_flux, sum_str_N_flux, sum_lab_N_flux, ) def Check_N_cycle_soil_balance(idx, header=False, jd=None, pjd=None): if header: return "{:<6} {:<6} {:<2} {:>10} {:>10} {:>10} {:>10}\n".format("idx","yr","m","errORGN","delORGN","errAVL","delAVL" ) return "{:<6} {:<6} {:<2} {:>10.4f} {:>10.4f} {:>10.4f} {:>10.4f}\n".format( idx, jd["Year"], jd["Month"], bal_N_soil_org(jd, pjd).err, bal_N_soil_org(jd, pjd).delta, bal_N_soil_avl(jd, pjd).err, bal_N_soil_avl(jd, pjd).delta ) def Check_C_cycle_soil_balance(idx, header=False, jd=None, pjd=None): if header: return '{:<4} {:>2} {:>4} {:>8} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10}\n'.format( 'idx', 'm', 'yr', 'err', 'deltaC', 'lfmdcrh', 'sumsoilC', 'ltrfal', 'mossdeathc', 'RH', 'checksum' ) else: return "{:<4} {:>2} {:>4} {:>8.2f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}\n".format( idx, jd["Month"], jd["Year"], bal_C_soil(jd, pjd).err, bal_C_soil(jd, pjd).delta, sum_across("LitterfallCarbonAll", jd, 'all') + sum_across("MossDeathC", jd, 'all') - jd["RH"], ecosystem_sum_soilC(jd), sum_across("LitterfallCarbonAll", jd, 'all') , sum_across("MossDeathC", jd, 'all'), jd['RH'], (jd['RHsomcr']+jd['RHsompr']+jd['RHsoma']+jd['RHraw']+jd['RHmossc']+jd['RHwdeb']), ) def Report_Soil_C(idx, header=False, jd=None, pjd=None): '''Create a table/report for Soil Carbon''' if header: return '{:<4} {:>4} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9} {:>9}\n'.format( 'idx', 'yr', 'RHtot', 'RHrawc', 'RHsomac', 'RHsomprc','RHsomcrc','RHmossc','RHwdeb','Lfc+dmsc','rawc','soma','sompr','somcr','dmossc' ) else: # FIll in the table with data... return "{:<4} {:>4} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f} {:>9.2f}\n".format( idx, jd['Year'], jd['RH'], jd['RHraw'], jd['RHsoma'], jd['RHsompr'], jd['RHsomcr'], jd['RHmossc'], jd['RHwdeb'], sum_across("LitterfallCarbonAll", jd, 'all') + jd['MossDeathC'], jd['RawCSum'], jd['SomaSum'], jd['SomprSum'], jd['SomcrSum'], ) def Check_C_cycle_veg_balance(idx, header=False, jd=None, pjd=None): '''Should duplicate Vegetation_Bgc::deltastate()''' if header: return '{:<4} {:>2} {:>4} {:>10} {:>10} {:>15} {:>10} {:>15} {:>15} {:>15}\n'.format( 'idx', 'm', 'yr', 'err', 'deltaC', 'NPP-LFallC-mdc', 'mdc', 'VegC', 'NPP', 'LFallC' ) else: # FIll in the table with data... return '{:<4d} {:>2} {:>4} {:>10.3f} {:>10.3f} {:>15.3f} {:>10.3f} {:>15.3f} {:>15.3f} {:>15.3f}\n'.format( idx, jd['Month'], jd['Year'], bal_C_veg(jd, pjd, xsec='all').err, bal_C_veg(jd, pjd, xsec='all').delta, sum_across("NPPAll", jd, 'all') - sum_across("LitterfallCarbonAll", jd, 'all') - sum_across("MossDeathC", jd, 'all'), sum_across("MossDeathC", jd, 'all'), sum_across("VegCarbon", jd, 'all'), sum_across("NPPAll", jd, 'all') , sum_across("LitterfallCarbonAll", jd, 'all') , ) def Check_C_cycle_veg_vascular_balance(idx, header=False, jd=None, pjd=None): '''Should duplicate Vegetation_Bgc::deltastate()''' # vascular PFT list (CMT05) vascular = [0,1,2,3,4] if header: return '{:<4} {:>2} {:>4} {:>10} {:>10} {:>15} {:>10} {:>15} {:>15} {:>15}\n'.format( 'idx', 'm', 'yr', 'err', 'deltaC', 'NPP-LFallC-mdc', 'mdc', 'VegC', 'NPP', 'LFallC' ) else: return '{:<4d} {:>2} {:>4} {:>10.3f} {:>10.3f} {:>15.3f} {:>10.3f} {:>15.3f} {:>15.3f} {:>15.3f}\n'.format( idx, jd['Month'], jd['Year'], bal_C_veg(jd, pjd, xsec='vasc').err, bal_C_veg(jd, pjd, xsec='vasc').delta, sum_across("NPPAll", jd, 'vasc') - sum_across("LitterfallCarbonAll", jd, 'vasc') - sum_across("MossDeathC",jd,'vasc'), sum_across("MossDeathC",jd, 'vasc'), sum_across("VegCarbon", jd, 'vasc'), sum_across("NPPAll", jd, 'vasc'), sum_across("LitterfallCarbonAll", jd, 'vasc') ) def Check_C_cycle_veg_nonvascular_balance(idx, header=False, jd=None, pjd=None): '''Should duplicate Vegetation_Bgc::deltastate()''' # non-vascular PFT list (CMT05) non_vasc = [5,6,7] if header: return '{:<4} {:>2} {:>4} {:>10} {:>10} {:>15} {:>10} {:>15} {:>15} {:>15}\n'.format( 'idx', 'm', 'yr', 'err', 'deltaC', 'NPP-LFallC-mdc', 'mdc', 'VegC', 'NPP', 'LFallC' ) else: # FIll in the table with data... return '{:<4d} {:>2} {:>4} {:>10.3f} {:>10.3f} {:>15.3f} {:>10.3f} {:>15.3f} {:>15.3f} {:>15.3f}\n'.format( idx, jd['Month'], jd['Year'], bal_C_veg(jd, pjd, xsec='nonvasc').err, bal_C_veg(jd, pjd, xsec='nonvasc').delta, sum_across("NPPAll", jd, 'nonvasc') - sum_across("LitterfallCarbonAll", jd, 'nonvasc') - sum_across("MossDeathC", jd, 'nonvasc'), sum_across("MossDeathC", jd, 'nonvasc'), sum_across("VegCarbon", jd, 'nonvasc'), sum_across("NPPAll", jd, 'nonvasc') , sum_across("LitterfallCarbonAll", jd, 'nonvasc') , ) if __name__ == '__main__': # Callback for SIGINT. Allows exit w/o printing stacktrace to users screen original_sigint = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, exit_gracefully) error_image_choices = [ 'C soil', 'N soil org', 'N soil avl', 'C veg', 'C veg vasc', 'C veg nonvasc', 'N veg tot', 'N veg str', 'N veg lab', 'N veg vasc tot', 'N veg vasc str', 'N veg vasc lab', 'N veg nonvasc tot', 'N veg nonvasc str', 'N veg nonvasc lab', ] tab_reports_and_timeseries_choices = [ 'N_soil_balance', 'N_veg_balance', 'C_soil_balance', 'C_veg_balance', 'C_veg_vascular_balance', 'C_veg_nonvascular_balance', 'report_soil_C' ] # Make a table listing options for the help text t = itertools.izip_longest(error_image_choices, tab_reports_and_timeseries_choices) option_table = "\n".join(["{:>30} {:>30}".format(r[0], r[1]) for r in t]) option_table = "\n" + option_table # # Setup the command line interface... # parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ Error image and tabular report options %s ''' % (option_table)), epilog=textwrap.dedent('''\ epilog text...''') ) parser.add_argument('-s', '--slice', default='', type=str, help="A custom file slice string") parser.add_argument('-a', '--from-archive', default=False, help=textwrap.dedent('''Generate plots from an archive of json files, instead of the normal /tmp directory.''')) parser.add_argument('-i', '--error-image', default=False, nargs='+', choices=error_image_choices, metavar="P", help=textwrap.dedent('''Generate at 2D image plot of the error''') ) parser.add_argument('-p', '--plot-timeseries', default=False, nargs='+', choices=tab_reports_and_timeseries_choices, metavar="P", help=textwrap.dedent('''Generate timeseries''') ) parser.add_argument('-c', '--tab-reports', default=False, nargs='+', choices=tab_reports_and_timeseries_choices, metavar="P", help=textwrap.dedent('''Generate tabular reports''') ) # parser.add_argument('--save-name', default="", # help="A file name prefix to use for saving plots.") # parser.add_argument('--save-format', default="pdf", # help="Choose a file format to use for saving plots.") print "Parsing command line arguments..." args = parser.parse_args() print args slstr = args.slice fromarchive = args.from_archive if args.error_image: print "Creating error image plots..." error_image(plotlist=args.error_image, fileslice=slstr) if args.plot_timeseries: print "Creating timeseries plots..." plot_tests(args.plot_timeseries, fileslice=slstr) if args.tab_reports: print "Creating tabular reports..." run_tests(args.tab_reports, fileslice=slstr, p2c=True)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for origin_info module.""" import inspect import sys import textwrap from tensorflow.python.autograph.pyct import anno from tensorflow.python.autograph.pyct import inspect_utils from tensorflow.python.autograph.pyct import origin_info from tensorflow.python.autograph.pyct import parser from tensorflow.python.autograph.pyct.testing import basic_definitions from tensorflow.python.platform import test from tensorflow.python.util import tf_inspect class OriginInfoTest(test.TestCase): def test_create_source_map(self): source = """ def test_fn(x): return x + 1 """ source = textwrap.dedent(source) node = parser.parse(source) fake_origin = origin_info.OriginInfo( loc=origin_info.Location('fake_filename', 3, 7), function_name='fake_function_name', source_code_line='fake source line', comment=None) anno.setanno(node, anno.Basic.ORIGIN, fake_origin) source_map = origin_info.create_source_map(node, source, 'test_filename') loc = origin_info.LineLocation('test_filename', 2) self.assertIn(loc, source_map) self.assertIs(source_map[loc], fake_origin) def _create_source_map(self, test_fn): node, source = parser.parse_entity(test_fn, ()) origin_info.resolve_entity(node, source, test_fn) # Creating a source map with the source code as output will create # an identity map. return origin_info.create_source_map(node, source, 'test_filename') def test_create_source_map_identity(self): test_fn = basic_definitions.simple_function source_map = self._create_source_map(test_fn) module_path = tf_inspect.getsourcefile(test_fn) # Origin line numbers below should match those in basic_definitions.py fn_start = inspect.getsourcelines(test_fn)[1] definition_loc = origin_info.LineLocation('test_filename', 1) self.assertIn(definition_loc, source_map) self.assertEqual(source_map[definition_loc].loc.lineno, fn_start) self.assertEqual(source_map[definition_loc].loc.filename, module_path) self.assertEqual(source_map[definition_loc].function_name, 'simple_function') def test_create_source_map_multiline_call(self): test_fn = basic_definitions.function_with_multiline_call source_map = self._create_source_map(test_fn) module_path = tf_inspect.getsourcefile(test_fn) # Origin line numbers below should match those in basic_definitions.py fn_start = inspect.getsourcelines(test_fn)[1] call_loc = origin_info.LineLocation('test_filename', 3) self.assertIn(call_loc, source_map) self.assertEqual(source_map[call_loc].loc.lineno, fn_start + 2) self.assertEqual(source_map[call_loc].loc.filename, module_path) self.assertEqual(source_map[call_loc].function_name, 'function_with_multiline_call') self.assertEqual(source_map[call_loc].source_code_line, ' return range(') second_arg_loc = origin_info.LineLocation('test_filename', 5) self.assertIn(second_arg_loc, source_map) self.assertEqual(source_map[second_arg_loc].loc.lineno, fn_start + 4) self.assertEqual(source_map[second_arg_loc].loc.filename, module_path) self.assertEqual(source_map[second_arg_loc].function_name, 'function_with_multiline_call') self.assertEqual(source_map[second_arg_loc].source_code_line, ' x + 1,') def test_create_source_map_no_origin_info(self): test_fn = basic_definitions.simple_function node, _ = parser.parse_entity(test_fn, inspect_utils.getfutureimports(test_fn)) # No origin information should result in an empty map. test_fn_lines, _ = tf_inspect.getsourcelines(test_fn) source_map = origin_info.create_source_map(node, '\n'.join(test_fn_lines), test_fn) self.assertEmpty(source_map) def test_resolve(self): source = """ def test_fn(x): '''Docstring.''' return x # comment """ source = textwrap.dedent(source) node = parser.parse(source) origin_info.resolve(node, source, 'test_file', 10, 10) def_origin = anno.getanno(node, anno.Basic.ORIGIN) self.assertEqual(def_origin.loc.filename, 'test_file') self.assertEqual(def_origin.loc.lineno, 10) self.assertEqual(def_origin.loc.col_offset, 10) self.assertEqual(def_origin.source_code_line, 'def test_fn(x):') self.assertIsNone(def_origin.comment) docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN) self.assertEqual(def_origin.loc.filename, 'test_file') self.assertEqual(docstring_origin.loc.lineno, 11) self.assertEqual(docstring_origin.loc.col_offset, 12) self.assertEqual(docstring_origin.source_code_line, " '''Docstring.'''") self.assertIsNone(docstring_origin.comment) ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN) self.assertEqual(def_origin.loc.filename, 'test_file') self.assertEqual(ret_origin.loc.lineno, 12) self.assertEqual(ret_origin.loc.col_offset, 12) self.assertEqual(ret_origin.source_code_line, ' return x # comment') self.assertEqual(ret_origin.comment, 'comment') def test_resolve_with_trailing_garbage(self): # This comment will be missed because the tokenizer fails to reach it. source = ' lambda: foo([], bar=1)), baz=2)()' clean_source = 'lambda: foo([], bar=1)' node = parser.parse(clean_source).value origin_info.resolve(node, source, 'test_file', 10, 10) def_origin = anno.getanno(node, anno.Basic.ORIGIN) self.assertEqual(def_origin.loc.lineno, 10) self.assertEqual(def_origin.loc.col_offset, 10) self.assertEqual(def_origin.source_code_line, source) self.assertIsNone(def_origin.comment) def test_resolve_entity(self): test_fn = basic_definitions.simple_function node, source = parser.parse_entity( test_fn, inspect_utils.getfutureimports(test_fn)) origin_info.resolve_entity(node, source, test_fn) # The line numbers below should match those in basic_definitions.py fn_start = inspect.getsourcelines(test_fn)[1] def_origin = anno.getanno(node, anno.Basic.ORIGIN) self.assertEqual(def_origin.loc.lineno, fn_start) self.assertEqual(def_origin.loc.col_offset, 0) self.assertEqual(def_origin.source_code_line, 'def simple_function(x):') self.assertIsNone(def_origin.comment) docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN) self.assertEqual(docstring_origin.loc.lineno, fn_start + 1) self.assertEqual(docstring_origin.loc.col_offset, 2) self.assertEqual(docstring_origin.source_code_line, ' """Docstring."""') self.assertIsNone(docstring_origin.comment) ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN) self.assertEqual(ret_origin.loc.lineno, fn_start + 2) self.assertEqual(ret_origin.loc.col_offset, 2) self.assertEqual(ret_origin.source_code_line, ' return x # comment') self.assertEqual(ret_origin.comment, 'comment') def test_resolve_entity_nested_function(self): test_fn = basic_definitions.nested_functions node, source = parser.parse_entity( test_fn, inspect_utils.getfutureimports(test_fn)) origin_info.resolve_entity(node, source, test_fn) # The line numbers below should match those in basic_definitions.py fn_start = inspect.getsourcelines(test_fn)[1] inner_def_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN) self.assertEqual(inner_def_origin.loc.lineno, fn_start + 3) self.assertEqual(inner_def_origin.loc.col_offset, 2) self.assertEqual(inner_def_origin.source_code_line, ' def inner_fn(y):') self.assertIsNone(inner_def_origin.comment) inner_ret_origin = anno.getanno(node.body[1].body[0], anno.Basic.ORIGIN) self.assertEqual(inner_ret_origin.loc.lineno, fn_start + 4) self.assertEqual(inner_ret_origin.loc.col_offset, 4) self.assertEqual(inner_ret_origin.source_code_line, ' return y') self.assertIsNone(inner_ret_origin.comment) def test_resolve_entity_indented_block(self): test_fn = basic_definitions.SimpleClass.simple_method node, source = parser.parse_entity(test_fn, inspect_utils.getfutureimports(test_fn)) origin_info.resolve_entity(node, source, test_fn) # The line numbers below should match those in basic_definitions.py fn_start = inspect.getsourcelines(test_fn)[1] def_origin = anno.getanno(node, anno.Basic.ORIGIN) self.assertEqual(def_origin.loc.lineno, fn_start) self.assertEqual(def_origin.loc.col_offset, 2) self.assertEqual(def_origin.source_code_line, 'def simple_method(self):') self.assertIsNone(def_origin.comment) ret_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN) self.assertEqual(ret_origin.loc.lineno, fn_start + 1) self.assertEqual(ret_origin.loc.col_offset, 4) self.assertEqual(ret_origin.source_code_line, ' return self') self.assertIsNone(ret_origin.comment) def test_resolve_entity_decorated_function(self): test_fn = basic_definitions.decorated_function node, source = parser.parse_entity(test_fn, inspect_utils.getfutureimports(test_fn)) origin_info.resolve_entity(node, source, test_fn) # The line numbers below should match those in basic_definitions.py fn_start = inspect.getsourcelines(test_fn)[1] def_origin = anno.getanno(node, anno.Basic.ORIGIN) if sys.version_info >= (3, 8): self.assertEqual(def_origin.loc.lineno, fn_start + 2) self.assertEqual(def_origin.source_code_line, 'def decorated_function(x):') else: self.assertEqual(def_origin.loc.lineno, fn_start) self.assertEqual(def_origin.source_code_line, '@basic_decorator') self.assertEqual(def_origin.loc.col_offset, 0) self.assertIsNone(def_origin.comment) if_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN) self.assertEqual(if_origin.loc.lineno, fn_start + 3) self.assertEqual(if_origin.loc.col_offset, 2) self.assertEqual(if_origin.source_code_line, ' if x > 0:') self.assertIsNone(if_origin.comment) ret1_origin = anno.getanno(node.body[0].body[0], anno.Basic.ORIGIN) self.assertEqual(ret1_origin.loc.lineno, fn_start + 4) self.assertEqual(ret1_origin.loc.col_offset, 4) self.assertEqual(ret1_origin.source_code_line, ' return 1') self.assertIsNone(ret1_origin.comment) ret2_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN) self.assertEqual(ret2_origin.loc.lineno, fn_start + 5) self.assertEqual(ret2_origin.loc.col_offset, 2) self.assertEqual(ret2_origin.source_code_line, ' return 2') self.assertIsNone(ret2_origin.comment) if __name__ == '__main__': test.main()
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes supporting unit and lesson editing.""" __author__ = 'John Orr (jorr@google.com)' import cgi import logging import random import urllib import filer import messages import yaml from common import safe_dom from common import tags from common.schema_fields import FieldRegistry from common.schema_fields import SchemaField from controllers import sites from controllers.utils import ApplicationHandler from controllers.utils import BaseRESTHandler from controllers.utils import XsrfTokenManager from models import courses from models import models as m_models from models import review from models import roles from models import transforms from modules.oeditor import oeditor from tools import verify DRAFT_TEXT = 'Private' PUBLISHED_TEXT = 'Public' # The editor has severe limitations for editing nested lists of objects. First, # it does not allow one to move a lesson from one unit to another. We need a way # of doing that. Second, JSON schema specification does not seem to support a # type-safe array, which has objects of different types. We also want that # badly :). All in all - using generic schema-based object editor for editing # nested arrayable polymorphic attributes is a pain... STATUS_ANNOTATION = oeditor.create_bool_select_annotation( ['properties', 'is_draft'], 'Status', DRAFT_TEXT, PUBLISHED_TEXT, class_name='split-from-main-group') # Allowed matchers. Keys of this dict represent internal keys for the matcher # type, and the value represents the corresponding string that will appear in # the dashboard UI. ALLOWED_MATCHERS_NAMES = {review.PEER_MATCHER: messages.PEER_MATCHER_NAME} # Allowed graders. Keys of this dict represent internal keys for the grader # type, and the value represents the corresponding string that will appear in # the dashboard UI. ALLOWED_GRADERS_NAMES = { courses.AUTO_GRADER: messages.AUTO_GRADER_NAME, courses.HUMAN_GRADER: messages.HUMAN_GRADER_NAME, } class CourseOutlineRights(object): """Manages view/edit rights for course outline.""" @classmethod def can_view(cls, handler): return cls.can_edit(handler) @classmethod def can_edit(cls, handler): return roles.Roles.is_course_admin(handler.app_context) @classmethod def can_delete(cls, handler): return cls.can_edit(handler) @classmethod def can_add(cls, handler): return cls.can_edit(handler) class UnitLessonEditor(ApplicationHandler): """An editor for the unit and lesson titles.""" def get_import_course(self): """Shows setup form for course import.""" template_values = {} template_values['page_title'] = self.format_title('Import Course') template_values['page_title_linked'] = self.format_title( 'Import Course') annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT() if not annotations: template_values['main_content'] = 'No courses to import from.' self.render_page(template_values) return exit_url = self.canonicalize_url('/dashboard') rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI) form_html = oeditor.ObjectEditor.get_html_for( self, ImportCourseRESTHandler.SCHEMA_JSON, annotations, None, rest_url, exit_url, auto_return=True, save_button_caption='Import', required_modules=ImportCourseRESTHandler.REQUIRED_MODULES) template_values = {} template_values['page_title'] = self.format_title('Import Course') template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION template_values['main_content'] = form_html self.render_page(template_values) def get_edit_unit_lesson(self): """Shows editor for the list of unit and lesson titles.""" key = self.request.get('key') exit_url = self.canonicalize_url('/dashboard') rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI) form_html = oeditor.ObjectEditor.get_html_for( self, UnitLessonTitleRESTHandler.SCHEMA_JSON, UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT, key, rest_url, exit_url, required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES) template_values = {} template_values['page_title'] = self.format_title('Edit Course Outline') template_values[ 'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION template_values['main_content'] = form_html self.render_page(template_values) def post_add_lesson(self): """Adds new lesson to a first unit of the course.""" course = courses.Course(self) first_unit = None for unit in course.get_units(): if unit.type == verify.UNIT_TYPE_UNIT: first_unit = unit break if first_unit: lesson = course.add_lesson(first_unit) course.save() # TODO(psimakov): complete 'edit_lesson' view self.redirect(self.get_action_url( 'edit_lesson', key=lesson.lesson_id, extra_args={'is_newly_created': 1})) else: self.redirect('/dashboard') def post_add_unit(self): """Adds new unit to a course.""" course = courses.Course(self) unit = course.add_unit() course.save() self.redirect(self.get_action_url( 'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1})) def post_add_link(self): """Adds new link to a course.""" course = courses.Course(self) link = course.add_link() link.href = '' course.save() self.redirect(self.get_action_url( 'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1})) def post_add_assessment(self): """Adds new assessment to a course.""" course = courses.Course(self) assessment = course.add_assessment() course.save() self.redirect(self.get_action_url( 'edit_assessment', key=assessment.unit_id, extra_args={'is_newly_created': 1})) def _render_edit_form_for( self, rest_handler_cls, title, annotations_dict=None, delete_xsrf_token='delete-unit', page_description=None, extra_js_files=None): """Renders an editor form for a given REST handler class.""" if not annotations_dict: annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT key = self.request.get('key') extra_args = {} if self.request.get('is_newly_created'): extra_args['is_newly_created'] = 1 exit_url = self.canonicalize_url('/dashboard') rest_url = self.canonicalize_url(rest_handler_cls.URI) delete_url = '%s?%s' % ( self.canonicalize_url(rest_handler_cls.URI), urllib.urlencode({ 'key': key, 'xsrf_token': cgi.escape( self.create_xsrf_token(delete_xsrf_token)) })) form_html = oeditor.ObjectEditor.get_html_for( self, rest_handler_cls.SCHEMA_JSON, annotations_dict, key, rest_url, exit_url, extra_args=extra_args, delete_url=delete_url, delete_method='delete', read_only=not filer.is_editable_fs(self.app_context), required_modules=rest_handler_cls.REQUIRED_MODULES, extra_js_files=extra_js_files) template_values = {} template_values['page_title'] = self.format_title('Edit %s' % title) if page_description: template_values['page_description'] = page_description template_values['main_content'] = form_html self.render_page(template_values) def get_edit_unit(self): """Shows unit editor.""" self._render_edit_form_for( UnitRESTHandler, 'Unit', page_description=messages.UNIT_EDITOR_DESCRIPTION) def get_edit_link(self): """Shows link editor.""" self._render_edit_form_for( LinkRESTHandler, 'Link', page_description=messages.LINK_EDITOR_DESCRIPTION) def get_edit_assessment(self): """Shows assessment editor.""" self._render_edit_form_for( AssessmentRESTHandler, 'Assessment', page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION, extra_js_files=['assessment_editor_lib.js', 'assessment_editor.js']) def get_edit_lesson(self): """Shows the lesson/activity editor.""" self._render_edit_form_for( LessonRESTHandler, 'Lessons and Activities', annotations_dict=LessonRESTHandler.get_schema_annotations_dict( courses.Course(self).get_units()), delete_xsrf_token='delete-lesson', extra_js_files=LessonRESTHandler.EXTRA_JS_FILES) class CommonUnitRESTHandler(BaseRESTHandler): """A common super class for all unit REST handlers.""" def unit_to_dict(self, unused_unit): """Converts a unit to a dictionary representation.""" raise Exception('Not implemented') def apply_updates( self, unused_unit, unused_updated_unit_dict, unused_errors): """Applies changes to a unit; modifies unit input argument.""" raise Exception('Not implemented') def get(self): """A GET REST method shared by all unit types.""" key = self.request.get('key') if not CourseOutlineRights.can_view(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return unit = courses.Course(self).find_unit_by_id(key) if not unit: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return message = ['Success.'] if self.request.get('is_newly_created'): unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower() message.append( 'New %s has been created and saved.' % unit_type) transforms.send_json_response( self, 200, '\n'.join(message), payload_dict=self.unit_to_dict(unit), xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit')) def put(self): """A PUT REST method shared by all unit types.""" request = transforms.loads(self.request.get('request')) key = request.get('key') if not self.assert_xsrf_token_or_fail( request, 'put-unit', {'key': key}): return if not CourseOutlineRights.can_edit(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return unit = courses.Course(self).find_unit_by_id(key) if not unit: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return payload = request.get('payload') updated_unit_dict = transforms.json_to_dict( transforms.loads(payload), self.SCHEMA_DICT) errors = [] self.apply_updates(unit, updated_unit_dict, errors) if not errors: course = courses.Course(self) assert course.update_unit(unit) course.save() transforms.send_json_response(self, 200, 'Saved.') else: transforms.send_json_response(self, 412, '\n'.join(errors)) def delete(self): """Handles REST DELETE verb with JSON payload.""" key = self.request.get('key') if not self.assert_xsrf_token_or_fail( self.request, 'delete-unit', {'key': key}): return if not CourseOutlineRights.can_delete(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return course = courses.Course(self) unit = course.find_unit_by_id(key) if not unit: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return course.delete_unit(unit) course.save() transforms.send_json_response(self, 200, 'Deleted.') class UnitRESTHandler(CommonUnitRESTHandler): """Provides REST API to unit.""" URI = '/rest/course/unit' SCHEMA_JSON = """ { "id": "Unit Entity", "type": "object", "description": "Unit", "properties": { "key" : {"type": "string"}, "type": {"type": "string"}, "title": {"optional": true, "type": "string"}, "is_draft": {"type": "boolean"} } } """ SCHEMA_DICT = transforms.loads(SCHEMA_JSON) SCHEMA_ANNOTATIONS_DICT = [ (['title'], 'Unit'), (['properties', 'key', '_inputex'], { 'label': 'ID', '_type': 'uneditable'}), (['properties', 'type', '_inputex'], { 'label': 'Type', '_type': 'uneditable'}), (['properties', 'title', '_inputex'], {'label': 'Title'}), STATUS_ANNOTATION] REQUIRED_MODULES = [ 'inputex-string', 'inputex-select', 'inputex-uneditable'] def unit_to_dict(self, unit): assert unit.type == 'U' return { 'key': unit.unit_id, 'type': verify.UNIT_TYPE_NAMES[unit.type], 'title': unit.title, 'is_draft': not unit.now_available} def apply_updates(self, unit, updated_unit_dict, unused_errors): unit.title = updated_unit_dict.get('title') unit.now_available = not updated_unit_dict.get('is_draft') class LinkRESTHandler(CommonUnitRESTHandler): """Provides REST API to link.""" URI = '/rest/course/link' SCHEMA_JSON = """ { "id": "Link Entity", "type": "object", "description": "Link", "properties": { "key" : {"type": "string"}, "type": {"type": "string"}, "title": {"optional": true, "type": "string"}, "url": {"optional": true, "type": "string"}, "is_draft": {"type": "boolean"} } } """ SCHEMA_DICT = transforms.loads(SCHEMA_JSON) SCHEMA_ANNOTATIONS_DICT = [ (['title'], 'Link'), (['properties', 'key', '_inputex'], { 'label': 'ID', '_type': 'uneditable'}), (['properties', 'type', '_inputex'], { 'label': 'Type', '_type': 'uneditable'}), (['properties', 'title', '_inputex'], {'label': 'Title'}), (['properties', 'url', '_inputex'], { 'label': 'URL', 'description': messages.LINK_EDITOR_URL_DESCRIPTION}), STATUS_ANNOTATION] REQUIRED_MODULES = [ 'inputex-string', 'inputex-select', 'inputex-uneditable'] def unit_to_dict(self, unit): assert unit.type == 'O' return { 'key': unit.unit_id, 'type': verify.UNIT_TYPE_NAMES[unit.type], 'title': unit.title, 'url': unit.href, 'is_draft': not unit.now_available} def apply_updates(self, unit, updated_unit_dict, unused_errors): unit.title = updated_unit_dict.get('title') unit.href = updated_unit_dict.get('url') unit.now_available = not updated_unit_dict.get('is_draft') class ImportCourseRESTHandler(CommonUnitRESTHandler): """Provides REST API to course import.""" URI = '/rest/course/import' SCHEMA_JSON = """ { "id": "Import Course Entity", "type": "object", "description": "Import Course", "properties": { "course" : {"type": "string"} } } """ SCHEMA_DICT = transforms.loads(SCHEMA_JSON) REQUIRED_MODULES = [ 'inputex-string', 'inputex-select', 'inputex-uneditable'] @classmethod def _get_course_list(cls): # Make a list of courses user has the rights to. course_list = [] for acourse in sites.get_all_courses(): if not roles.Roles.is_course_admin(acourse): continue if acourse == sites.get_course_for_current_request(): continue course_list.append({ 'value': acourse.raw, 'label': cgi.escape(acourse.get_title())}) return course_list @classmethod def SCHEMA_ANNOTATIONS_DICT(cls): # pylint: disable-msg=g-bad-name """Schema annotations are dynamic and include a list of courses.""" course_list = cls._get_course_list() if not course_list: return None # Format annotations. return [ (['title'], 'Import Course'), ( ['properties', 'course', '_inputex'], { 'label': 'Available Courses', '_type': 'select', 'choices': course_list})] def get(self): """Handles REST GET verb and returns an object as JSON payload.""" if not CourseOutlineRights.can_view(self): transforms.send_json_response(self, 401, 'Access denied.', {}) return first_course_in_dropdown = self._get_course_list()[0]['value'] transforms.send_json_response( self, 200, None, payload_dict={'course': first_course_in_dropdown}, xsrf_token=XsrfTokenManager.create_xsrf_token( 'import-course')) def put(self): """Handles REST PUT verb with JSON payload.""" request = transforms.loads(self.request.get('request')) if not self.assert_xsrf_token_or_fail( request, 'import-course', {'key': None}): return if not CourseOutlineRights.can_edit(self): transforms.send_json_response(self, 401, 'Access denied.', {}) return payload = request.get('payload') course_raw = transforms.json_to_dict( transforms.loads(payload), self.SCHEMA_DICT)['course'] source = None for acourse in sites.get_all_courses(): if acourse.raw == course_raw: source = acourse break if not source: transforms.send_json_response( self, 404, 'Object not found.', {'raw': course_raw}) return course = courses.Course(self) errors = [] try: course.import_from(source, errors) except Exception as e: # pylint: disable-msg=broad-except logging.exception(e) errors.append('Import failed: %s' % e) if errors: transforms.send_json_response(self, 412, '\n'.join(errors)) return course.save() transforms.send_json_response(self, 200, 'Imported.') def workflow_key(key): return 'workflow:%s' % key def create_assessment_registry(): """Create the registry for course properties.""" reg = FieldRegistry('Assessment Entity', description='Assessment') # Course level settings. course_opts = reg.add_sub_registry('assessment', 'Assessment Config') course_opts.add_property(SchemaField( 'key', 'ID', 'string', editable=False, extra_schema_dict_values={'className': 'inputEx-Field keyHolder'})) course_opts.add_property( SchemaField('type', 'Type', 'string', editable=False)) course_opts.add_property( SchemaField('title', 'Title', 'string', optional=True)) course_opts.add_property( SchemaField('weight', 'Weight', 'string', optional=True)) course_opts.add_property(SchemaField( 'content', 'Assessment Content', 'text', optional=True, description=str(messages.ASSESSMENT_CONTENT_DESCRIPTION), extra_schema_dict_values={'className': 'inputEx-Field content'})) course_opts.add_property(SchemaField( 'html_content', 'Assessment Content (HTML)', 'html', optional=True, extra_schema_dict_values={ 'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value, 'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE, 'className': 'inputEx-Field html-content'})) course_opts.add_property(SchemaField( 'html_check_answers', '"Check Answers" Buttons', 'boolean', optional=True, extra_schema_dict_values={ 'className': 'inputEx-Field assessment-editor-check-answers'})) course_opts.add_property( SchemaField(workflow_key(courses.SUBMISSION_DUE_DATE_KEY), 'Submission Due Date', 'string', optional=True, description=str(messages.DUE_DATE_FORMAT_DESCRIPTION))) course_opts.add_property( SchemaField(workflow_key(courses.GRADER_KEY), 'Grading Method', 'string', select_data=ALLOWED_GRADERS_NAMES.items())) course_opts.add_property( SchemaField('is_draft', 'Status', 'boolean', select_data=[(True, DRAFT_TEXT), (False, PUBLISHED_TEXT)], extra_schema_dict_values={ 'className': 'split-from-main-group'})) review_opts = reg.add_sub_registry( 'review_opts', 'Review Config', description=str(messages.ASSESSMENT_DETAILS_DESCRIPTION)) if len(ALLOWED_MATCHERS_NAMES) > 1: review_opts.add_property( SchemaField(workflow_key(courses.MATCHER_KEY), 'Review Matcher', 'string', optional=True, select_data=ALLOWED_MATCHERS_NAMES.items())) review_opts.add_property( SchemaField( 'review_form', 'Reviewer Feedback Form', 'text', optional=True, description=str(messages.REVIEWER_FEEDBACK_FORM_DESCRIPTION), extra_schema_dict_values={ 'className': 'inputEx-Field review-form'})) review_opts.add_property(SchemaField( 'html_review_form', 'Reviewer Feedback Form (HTML)', 'html', optional=True, extra_schema_dict_values={ 'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value, 'excludedCustomTags': tags.EditorBlacklists.ASSESSMENT_SCOPE, 'className': 'inputEx-Field html-review-form'})) review_opts.add_property( SchemaField( workflow_key(courses.REVIEW_DUE_DATE_KEY), 'Review Due Date', 'string', optional=True, description=str(messages.REVIEW_DUE_DATE_FORMAT_DESCRIPTION))) review_opts.add_property( SchemaField(workflow_key(courses.REVIEW_MIN_COUNT_KEY), 'Review Min Count', 'integer', optional=True, description=str(messages.REVIEW_MIN_COUNT_DESCRIPTION))) review_opts.add_property( SchemaField(workflow_key(courses.REVIEW_WINDOW_MINS_KEY), 'Review Window Timeout', 'integer', optional=True, description=str(messages.REVIEW_TIMEOUT_IN_MINUTES))) return reg class AssessmentRESTHandler(CommonUnitRESTHandler): """Provides REST API to assessment.""" URI = '/rest/course/assessment' REG = create_assessment_registry() SCHEMA_JSON = REG.get_json_schema() SCHEMA_DICT = REG.get_json_schema_dict() SCHEMA_ANNOTATIONS_DICT = REG.get_schema_dict() REQUIRED_MODULES = [ 'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-textarea', 'inputex-uneditable', 'inputex-integer', 'inputex-hidden', 'inputex-checkbox'] def _get_assessment_path(self, unit): return self.app_context.fs.impl.physical_to_logical( courses.Course(self).get_assessment_filename(unit.unit_id)) def _get_review_form_path(self, unit): return self.app_context.fs.impl.physical_to_logical( courses.Course(self).get_review_form_filename(unit.unit_id)) def unit_to_dict(self, unit): """Assemble a dict with the unit data fields.""" assert unit.type == 'A' path = self._get_assessment_path(unit) fs = self.app_context.fs if fs.isfile(path): content = fs.get(path) else: content = '' review_form_path = self._get_review_form_path(unit) if review_form_path and fs.isfile(review_form_path): review_form = fs.get(review_form_path) else: review_form = '' workflow = unit.workflow if workflow.get_submission_due_date(): submission_due_date = workflow.get_submission_due_date().strftime( courses.ISO_8601_DATE_FORMAT) else: submission_due_date = '' if workflow.get_review_due_date(): review_due_date = workflow.get_review_due_date().strftime( courses.ISO_8601_DATE_FORMAT) else: review_due_date = '' return { 'assessment': { 'key': unit.unit_id, 'type': verify.UNIT_TYPE_NAMES[unit.type], 'title': unit.title, 'weight': str(unit.weight if hasattr(unit, 'weight') else 0), 'content': content, 'html_content': unit.html_content or '', 'html_check_answers': unit.html_check_answers, 'is_draft': not unit.now_available, workflow_key(courses.SUBMISSION_DUE_DATE_KEY): ( submission_due_date), workflow_key(courses.GRADER_KEY): workflow.get_grader(), }, 'review_opts': { workflow_key(courses.MATCHER_KEY): workflow.get_matcher(), workflow_key(courses.REVIEW_DUE_DATE_KEY): review_due_date, workflow_key(courses.REVIEW_MIN_COUNT_KEY): ( workflow.get_review_min_count()), workflow_key(courses.REVIEW_WINDOW_MINS_KEY): ( workflow.get_review_window_mins()), 'review_form': review_form, 'html_review_form': unit.html_review_form or '' } } def apply_updates(self, unit, updated_unit_dict, errors): """Store the updated assessment.""" entity_dict = {} AssessmentRESTHandler.REG.convert_json_to_entity( updated_unit_dict, entity_dict) unit.title = entity_dict.get('title') try: unit.weight = int(entity_dict.get('weight')) if unit.weight < 0: errors.append('The weight must be a non-negative integer.') except ValueError: errors.append('The weight must be an integer.') unit.now_available = not entity_dict.get('is_draft') course = courses.Course(self) content = entity_dict.get('content') if content: course.set_assessment_content( unit, entity_dict.get('content'), errors=errors) unit.html_content = entity_dict.get('html_content') unit.html_check_answers = entity_dict.get('html_check_answers') workflow_dict = entity_dict.get('workflow') if len(ALLOWED_MATCHERS_NAMES) == 1: workflow_dict[courses.MATCHER_KEY] = ( ALLOWED_MATCHERS_NAMES.keys()[0]) unit.workflow_yaml = yaml.safe_dump(workflow_dict) unit.workflow.validate(errors=errors) # Only save the review form if the assessment needs human grading. if not errors: if course.needs_human_grader(unit): review_form = entity_dict.get('review_form') if review_form: course.set_review_form( unit, review_form, errors=errors) unit.html_review_form = entity_dict.get('html_review_form') elif entity_dict.get('review_form'): errors.append( 'Review forms for auto-graded assessments should be empty.') class UnitLessonTitleRESTHandler(BaseRESTHandler): """Provides REST API to unit and lesson titles.""" URI = '/rest/course/outline' SCHEMA_JSON = """ { "type": "object", "description": "Course Outline", "properties": { "outline": { "type": "array", "items": { "type": "object", "properties": { "id": {"type": "string"}, "title": {"type": "string"}, "lessons": { "type": "array", "items": { "type": "object", "properties": { "id": {"type": "string"}, "title": {"type": "string"} } } } } } } } } """ SCHEMA_DICT = transforms.loads(SCHEMA_JSON) SCHEMA_ANNOTATIONS_DICT = [ (['title'], 'Course Outline'), (['_inputex'], {'className': 'organizer'}), (['properties', 'outline', '_inputex'], { 'sortable': 'true', 'label': ''}), ([ 'properties', 'outline', 'items', 'properties', 'title', '_inputex'], { '_type': 'uneditable', 'label': ''}), (['properties', 'outline', 'items', 'properties', 'id', '_inputex'], { '_type': 'hidden'}), (['properties', 'outline', 'items', 'properties', 'lessons', '_inputex'], { 'sortable': 'true', 'label': '', 'listAddLabel': 'Add a new lesson', 'listRemoveLabel': 'Delete'}), (['properties', 'outline', 'items', 'properties', 'lessons', 'items', 'properties', 'title', '_inputex'], { '_type': 'uneditable', 'label': ''}), (['properties', 'outline', 'items', 'properties', 'lessons', 'items', 'properties', 'id', '_inputex'], { '_type': 'hidden'}) ] REQUIRED_MODULES = [ 'inputex-hidden', 'inputex-list', 'inputex-string', 'inputex-uneditable'] def get(self): """Handles REST GET verb and returns an object as JSON payload.""" if not CourseOutlineRights.can_view(self): transforms.send_json_response(self, 401, 'Access denied.', {}) return course = courses.Course(self) outline_data = [] for unit in course.get_units(): lesson_data = [] for lesson in course.get_lessons(unit.unit_id): lesson_data.append({ 'title': lesson.title, 'id': lesson.lesson_id}) unit_title = unit.title if verify.UNIT_TYPE_UNIT == unit.type: unit_title = 'Unit: %s' % unit.title outline_data.append({ 'title': unit_title, 'id': unit.unit_id, 'lessons': lesson_data}) transforms.send_json_response( self, 200, None, payload_dict={'outline': outline_data}, xsrf_token=XsrfTokenManager.create_xsrf_token( 'unit-lesson-reorder')) def put(self): """Handles REST PUT verb with JSON payload.""" request = transforms.loads(self.request.get('request')) if not self.assert_xsrf_token_or_fail( request, 'unit-lesson-reorder', {'key': None}): return if not CourseOutlineRights.can_edit(self): transforms.send_json_response(self, 401, 'Access denied.', {}) return payload = request.get('payload') payload_dict = transforms.json_to_dict( transforms.loads(payload), self.SCHEMA_DICT) course = courses.Course(self) course.reorder_units(payload_dict['outline']) course.save() transforms.send_json_response(self, 200, 'Saved.') class LessonRESTHandler(BaseRESTHandler): """Provides REST API to handle lessons and activities.""" URI = '/rest/course/lesson' # Note GcbRte relies on the structure of this schema. Do not change without # checking the dependency. SCHEMA_JSON = """ { "id": "Lesson Entity", "type": "object", "description": "Lesson", "properties": { "key" : {"type": "string"}, "title" : {"type": "string"}, "unit_id": {"type": "string"}, "video" : {"type": "string", "optional": true}, "scored": {"type": "string"}, "objectives" : { "type": "string", "format": "html", "optional": true}, "notes" : {"type": "string", "optional": true}, "activity_title" : {"type": "string", "optional": true}, "activity_listed" : {"type": "boolean", "optional": true}, "activity": {"type": "string", "format": "text", "optional": true}, "is_draft": {"type": "boolean"} } } """ SCHEMA_DICT = transforms.loads(SCHEMA_JSON) REQUIRED_MODULES = [ 'inputex-string', 'gcb-rte', 'inputex-select', 'inputex-textarea', 'inputex-uneditable', 'inputex-checkbox'] EXTRA_JS_FILES = ['lesson_editor_lib.js', 'lesson_editor.js'] @classmethod def get_schema_annotations_dict(cls, units): unit_list = [] for unit in units: if unit.type == 'U': unit_list.append({ 'label': cgi.escape( 'Unit %s - %s' % (unit.index, unit.title)), 'value': unit.unit_id}) return [ (['title'], 'Lesson'), (['properties', 'key', '_inputex'], { 'label': 'ID', '_type': 'uneditable', 'className': 'inputEx-Field keyHolder'}), (['properties', 'title', '_inputex'], {'label': 'Title'}), (['properties', 'unit_id', '_inputex'], { 'label': 'Parent Unit', '_type': 'select', 'choices': unit_list}), (['properties', 'scored', '_inputex'], { '_type': 'select', 'choices': [ {'label': 'Questions are scored', 'value': 'scored'}, { 'label': 'Questions only give feedback', 'value': 'not_scored'}], 'label': 'Scored', 'description': messages.LESSON_SCORED_DESCRIPTION}), # TODO(sll): The internal 'objectives' property should also be # renamed. (['properties', 'objectives', '_inputex'], { 'label': 'Lesson Body', 'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value, 'description': messages.LESSON_OBJECTIVES_DESCRIPTION}), (['properties', 'video', '_inputex'], { 'label': 'Video ID', 'description': messages.LESSON_VIDEO_ID_DESCRIPTION}), (['properties', 'notes', '_inputex'], { 'label': 'Notes', 'description': messages.LESSON_NOTES_DESCRIPTION}), (['properties', 'activity_title', '_inputex'], { 'label': 'Activity Title', 'description': messages.LESSON_ACTIVITY_TITLE_DESCRIPTION}), (['properties', 'activity_listed', '_inputex'], { 'label': 'Activity Listed', 'description': messages.LESSON_ACTIVITY_LISTED_DESCRIPTION}), (['properties', 'activity', '_inputex'], { 'label': 'Activity', 'description': str(messages.LESSON_ACTIVITY_DESCRIPTION), 'className': 'inputEx-Field activityHolder'}), STATUS_ANNOTATION] def get(self): """Handles GET REST verb and returns lesson object as JSON payload.""" if not CourseOutlineRights.can_view(self): transforms.send_json_response(self, 401, 'Access denied.', {}) return key = self.request.get('key') course = courses.Course(self) lesson = course.find_lesson_by_id(None, key) assert lesson fs = self.app_context.fs path = fs.impl.physical_to_logical(course.get_activity_filename( lesson.unit_id, lesson.lesson_id)) if lesson.has_activity and fs.isfile(path): activity = fs.get(path) else: activity = '' payload_dict = { 'key': key, 'title': lesson.title, 'unit_id': lesson.unit_id, 'scored': 'scored' if lesson.scored else 'not_scored', 'objectives': lesson.objectives, 'video': lesson.video, 'notes': lesson.notes, 'activity_title': lesson.activity_title, 'activity_listed': lesson.activity_listed, 'activity': activity, 'is_draft': not lesson.now_available } message = ['Success.'] if self.request.get('is_newly_created'): message.append('New lesson has been created and saved.') transforms.send_json_response( self, 200, '\n'.join(message), payload_dict=payload_dict, xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit')) def put(self): """Handles PUT REST verb to save lesson and associated activity.""" request = transforms.loads(self.request.get('request')) key = request.get('key') if not self.assert_xsrf_token_or_fail( request, 'lesson-edit', {'key': key}): return if not CourseOutlineRights.can_edit(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return course = courses.Course(self) lesson = course.find_lesson_by_id(None, key) if not lesson: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return payload = request.get('payload') updates_dict = transforms.json_to_dict( transforms.loads(payload), self.SCHEMA_DICT) lesson.title = updates_dict['title'] lesson.unit_id = updates_dict['unit_id'] lesson.scored = (updates_dict['scored'] == 'scored') lesson.objectives = updates_dict['objectives'] lesson.video = updates_dict['video'] lesson.notes = updates_dict['notes'] lesson.activity_title = updates_dict['activity_title'] lesson.activity_listed = updates_dict['activity_listed'] lesson.now_available = not updates_dict['is_draft'] activity = updates_dict.get('activity', '').strip() errors = [] if activity: lesson.has_activity = True course.set_activity_content(lesson, activity, errors=errors) else: lesson.has_activity = False fs = self.app_context.fs path = fs.impl.physical_to_logical(course.get_activity_filename( lesson.unit_id, lesson.lesson_id)) if fs.isfile(path): fs.delete(path) if not errors: assert course.update_lesson(lesson) course.save() transforms.send_json_response(self, 200, 'Saved.') else: transforms.send_json_response(self, 412, '\n'.join(errors)) def delete(self): """Handles REST DELETE verb with JSON payload.""" key = self.request.get('key') if not self.assert_xsrf_token_or_fail( self.request, 'delete-lesson', {'key': key}): return if not CourseOutlineRights.can_delete(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return course = courses.Course(self) lesson = course.find_lesson_by_id(None, key) if not lesson: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return assert course.delete_lesson(lesson) course.save() transforms.send_json_response(self, 200, 'Deleted.') def generate_instanceid(): chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789' length = 12 return ''.join([random.choice(chars) for unused_i in xrange(length)]) class CollisionError(Exception): """Exception raised to show that a collision in a namespace has occurred.""" class ImportActivityRESTHandler(BaseRESTHandler): """REST handler for requests to import an activity into the lesson body.""" URI = '/rest/course/lesson/activity' VERSION = '1.5' def put(self): """Handle REST PUT instruction to import an assignment.""" request = transforms.loads(self.request.get('request')) key = request.get('key') if not self.assert_xsrf_token_or_fail(request, 'lesson-edit', {}): return if not CourseOutlineRights.can_edit(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return text = request.get('text') try: content, noverify_text = verify.convert_javascript_to_python( text, 'activity') activity = verify.evaluate_python_expression_from_text( content, 'activity', verify.Activity().scope, noverify_text) except Exception: # pylint: disable-msg=broad-except transforms.send_json_response( self, 412, 'Unable to parse activity.') return try: verify.Verifier().verify_activity_instance(activity, 'none') except verify.SchemaException: transforms.send_json_response( self, 412, 'Unable to validate activity.') return self.course = courses.Course(self) self.lesson = self.course.find_lesson_by_id(None, key) self.unit = self.course.find_unit_by_id(self.lesson.unit_id) self.question_number = 0 self.question_descriptions = set( [q.description for q in m_models.QuestionDAO.get_all()]) self.question_group_descriptions = set( [qg.description for qg in m_models.QuestionGroupDAO.get_all()]) lesson_content = [] try: for item in activity['activity']: if isinstance(item, basestring): lesson_content.append(item) else: question_tag = self.import_question(item) lesson_content.append(question_tag) self.question_number += 1 except CollisionError: transforms.send_json_response( self, 412, ( 'This activity has already been imported. Remove duplicate ' 'imported questions from the question bank in order to ' 're-import.')) return except Exception as ex: transforms.send_json_response( self, 412, 'Unable to convert: %s' % ex) return transforms.send_json_response(self, 200, 'OK.', payload_dict={ 'content': '\n'.join(lesson_content) }) def _get_question_description(self): return ( 'Imported from unit "%s", lesson "%s" (question #%s)' % ( self.unit.title, self.lesson.title, self.question_number + 1)) def _insert_question(self, question_dict, question_type): question = m_models.QuestionDTO(None, question_dict) question.type = question_type return m_models.QuestionDAO.save(question) def _insert_question_group(self, question_group_dict): question_group = m_models.QuestionGroupDTO(None, question_group_dict) return m_models.QuestionGroupDAO.save(question_group) def import_question(self, item): question_type = item['questionType'] if question_type == 'multiple choice': question_dict = self.import_multiple_choice(item) quid = self._insert_question( question_dict, m_models.QuestionDTO.MULTIPLE_CHOICE) return '<question quid="%s" instanceid="%s"></question>' % ( quid, generate_instanceid()) elif question_type == 'multiple choice group': question_group_dict = self.import_multiple_choice_group(item) qgid = self._insert_question_group(question_group_dict) return ( '<question-group qgid="%s" instanceid="%s">' '</question-group>') % ( qgid, generate_instanceid()) elif question_type == 'freetext': question_dict = self.import_freetext(item) quid = self._insert_question( question_dict, m_models.QuestionDTO.SHORT_ANSWER) return '<question quid="%s" instanceid="%s"></question>' % ( quid, generate_instanceid()) else: raise ValueError('Unknown question type: %s' % question_type) def import_multiple_choice(self, orig_question): description = self._get_question_description() if description in self.question_descriptions: raise CollisionError() return { 'version': self.VERSION, 'description': description, 'question': '', 'multiple_selections': False, 'choices': [ { 'text': choice[0], 'score': 1.0 if choice[1].value else 0.0, 'feedback': choice[2] } for choice in orig_question['choices']]} def import_multiple_choice_group(self, mc_choice_group): """Import a 'multiple choice group' as a question group.""" description = self._get_question_description() if description in self.question_group_descriptions: raise CollisionError() question_group_dict = { 'version': self.VERSION, 'description': description} question_list = [] for index, question in enumerate(mc_choice_group['questionsList']): question_dict = self.import_multiple_choice_group_question( question, index) question = m_models.QuestionDTO(None, question_dict) question.type = m_models.QuestionDTO.MULTIPLE_CHOICE question_list.append(question) quid_list = m_models.QuestionDAO.save_all(question_list) question_group_dict['items'] = [{ 'question': str(quid), 'weight': 1.0} for quid in quid_list] return question_group_dict def import_multiple_choice_group_question(self, orig_question, index): """Import the questions from a group as individual questions.""" # TODO(jorr): Handle allCorrectOutput and someCorrectOutput description = ( 'Imported from unit "%s", lesson "%s" (question #%s, part #%s)' % ( self.unit.title, self.lesson.title, self.question_number + 1, index + 1)) if description in self.question_descriptions: raise CollisionError() correct_index = orig_question['correctIndex'] multiple_selections = not isinstance(correct_index, int) if multiple_selections: partial = 1.0 / len(correct_index) choices = [{ 'text': text, 'score': partial if i in correct_index else -1.0 } for i, text in enumerate(orig_question['choices'])] else: choices = [{ 'text': text, 'score': 1.0 if i == correct_index else 0.0 } for i, text in enumerate(orig_question['choices'])] return { 'version': self.VERSION, 'description': description, 'question': orig_question.get('questionHTML') or '', 'multiple_selections': multiple_selections, 'choices': choices} def import_freetext(self, orig_question): description = self._get_question_description() if description in self.question_descriptions: raise CollisionError() return { 'version': self.VERSION, 'description': description, 'question': '', 'hint': orig_question['showAnswerOutput'], 'graders': [{ 'score': 1.0, 'matcher': 'regex', 'response': orig_question['correctAnswerRegex'].value, 'feedback': orig_question.get('correctAnswerOutput') }], 'defaultFeedback': orig_question.get('incorrectAnswerOutput')} class ExportAssessmentRESTHandler(BaseRESTHandler): """REST handler for requests to export an activity into new format.""" URI = '/rest/course/asessment/export' VERSION = '1.5' def put(self): """Handle the PUT verb to export an assessment.""" request = transforms.loads(self.request.get('request')) key = request.get('key') if not CourseOutlineRights.can_edit(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return if not self.assert_xsrf_token_or_fail( request, 'put-unit', {'key': key}): return raw_assessment_dict = transforms.json_to_dict( request.get('payload'), AssessmentRESTHandler.SCHEMA_DICT) entity_dict = {} AssessmentRESTHandler.REG.convert_json_to_entity( raw_assessment_dict, entity_dict) course = courses.Course(self) self.unit = course.find_unit_by_id(key) self.question_descriptions = set( [q.description for q in m_models.QuestionDAO.get_all()]) # Import all the assessment context except the questions new_unit = course.add_assessment() errors = [] new_unit.title = 'Exported from %s ' % entity_dict.get('title') try: new_unit.weight = int(entity_dict.get('weight')) if new_unit.weight < 0: errors.append('The weight must be a non-negative integer.') except ValueError: errors.append('The weight must be an integer.') new_unit.now_available = not entity_dict.get('is_draft') workflow_dict = entity_dict.get('workflow') if len(ALLOWED_MATCHERS_NAMES) == 1: workflow_dict[courses.MATCHER_KEY] = ( ALLOWED_MATCHERS_NAMES.keys()[0]) new_unit.workflow_yaml = yaml.safe_dump(workflow_dict) new_unit.workflow.validate(errors=errors) if errors: transforms.send_json_response(self, 412, '\n'.join(errors)) return assessment_dict = self.get_assessment_dict(entity_dict.get('content')) if assessment_dict is None: return if assessment_dict.get('checkAnswers'): new_unit.html_check_answers = assessment_dict['checkAnswers'].value # Import the questions in the assessment and the review questionnaire html_content = [] html_review_form = [] if assessment_dict.get('preamble'): html_content.append(assessment_dict['preamble']) # prepare all the dtos for the questions in the assigment content question_dtos = self.get_question_dtos( assessment_dict, 'Imported from assessment "%s" (question #%s)') if question_dtos is None: return # prepare the questions for the review questionnaire, if necessary review_dtos = [] if course.needs_human_grader(new_unit): review_str = entity_dict.get('review_form') review_dict = self.get_assessment_dict(review_str) if review_dict is None: return if review_dict.get('preamble'): html_review_form.append(review_dict['preamble']) review_dtos = self.get_question_dtos( review_dict, 'Imported from assessment "%s" (review question #%s)') if review_dtos is None: return # batch submit the questions and split out their resulting id's all_dtos = question_dtos + review_dtos all_ids = m_models.QuestionDAO.save_all(all_dtos) question_ids = all_ids[:len(question_dtos)] review_ids = all_ids[len(question_dtos):] # insert question tags for the assessment content for quid in question_ids: html_content.append( str(safe_dom.Element( 'question', quid=str(quid), instanceid=generate_instanceid()))) new_unit.html_content = '\n'.join(html_content) # insert question tags for the review questionnaire for quid in review_ids: html_review_form.append( str(safe_dom.Element( 'question', quid=str(quid), instanceid=generate_instanceid()))) new_unit.html_review_form = '\n'.join(html_review_form) course.save() transforms.send_json_response( self, 200, ( 'The assessment has been exported to "%s".' % new_unit.title), payload_dict={'key': key}) def get_assessment_dict(self, assessment_content): """Validate the assessment scipt and return as a python dict.""" try: content, noverify_text = verify.convert_javascript_to_python( assessment_content, 'assessment') assessment = verify.evaluate_python_expression_from_text( content, 'assessment', verify.Assessment().scope, noverify_text) except Exception: # pylint: disable-msg=broad-except transforms.send_json_response( self, 412, 'Unable to parse asessment.') return None try: verify.Verifier().verify_assessment_instance(assessment, 'none') except verify.SchemaException: transforms.send_json_response( self, 412, 'Unable to validate assessment') return None return assessment['assessment'] def get_question_dtos(self, assessment_dict, description_template): """Convert the assessment into a list of QuestionDTO's.""" question_dtos = [] try: for i, question in enumerate(assessment_dict['questionsList']): description = description_template % (self.unit.title, (i + 1)) if description in self.question_descriptions: raise CollisionError() question_dto = self.import_question(question) question_dto.dict['description'] = description question_dtos.append(question_dto) except CollisionError: transforms.send_json_response( self, 412, ( 'This assessment has already been imported. Remove ' 'duplicate imported questions from the question bank in ' 'order to re-import.')) return None except Exception as ex: transforms.send_json_response( self, 412, 'Unable to convert: %s' % ex) return None return question_dtos def import_question(self, question): """Convert a single question into a QuestioDTO.""" if 'choices' in question: question_dict = self.import_multiple_choice_question(question) question_type = m_models.QuestionDTO.MULTIPLE_CHOICE elif 'correctAnswerNumeric' in question: question_dict = self.import_short_answer_question( question.get('questionHTML'), 'numeric', question.get('correctAnswerNumeric')) question_type = m_models.QuestionDTO.SHORT_ANSWER elif 'correctAnswerString' in question: question_dict = self.import_short_answer_question( question.get('questionHTML'), 'case_insensitive', question.get('correctAnswerString')) question_type = m_models.QuestionDTO.SHORT_ANSWER elif 'correctAnswerRegex' in question: question_dict = self.import_short_answer_question( question.get('questionHTML'), 'regex', question.get('correctAnswerRegex').value) question_type = m_models.QuestionDTO.SHORT_ANSWER else: raise ValueError('Unknown question type') question_dto = m_models.QuestionDTO(None, question_dict) question_dto.type = question_type return question_dto def import_multiple_choice_question(self, question): """Assemble the dict for a multiple choice question.""" question_dict = { 'version': self.VERSION, 'question': question.get('questionHTML') or '', 'multiple_selections': False } choices = [] for choice in question.get('choices'): if isinstance(choice, basestring): text = choice score = 0.0 else: text = choice.value score = 1.0 choices.append({ 'text': text, 'score': score }) question_dict['choices'] = choices return question_dict def import_short_answer_question(self, question_html, matcher, response): return { 'version': self.VERSION, 'question': question_html or '', 'graders': [{ 'score': 1.0, 'matcher': matcher, 'response': response, }] }
# # CORE # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. # # author: Tom Goff <thomas.goff@boeing.com> # ''' vnodeclient.py: implementation of the VnodeClient class for issuing commands over a control channel to the vnoded process running in a network namespace. The control channel can be accessed via calls to the vcmd Python module or by invoking the vcmd shell command. ''' import os, stat, sys from core.constants import * USE_VCMD_MODULE = True if USE_VCMD_MODULE: import vcmd else: import subprocess VCMD = os.path.join(CORE_SBIN_DIR, "vcmd") class VnodeClient(object): def __init__(self, name, ctrlchnlname): self.name = name self.ctrlchnlname = ctrlchnlname if USE_VCMD_MODULE: self.cmdchnl = vcmd.VCmd(self.ctrlchnlname) else: self.cmdchnl = None self._addr = {} def warn(self, msg): print("%s: %s" % (self.name, msg), file=sys.stderr) def connected(self): if USE_VCMD_MODULE: return self.cmdchnl.connected() else: return True def cmd(self, args, wait = True): ''' Execute a command on a node and return the status (return code). ''' if USE_VCMD_MODULE: if not self.cmdchnl.connected(): raise ValueError("self.cmdchnl not connected") tmp = self.cmdchnl.qcmd(args) if not wait: return tmp tmp = tmp.wait() else: if wait: mode = os.P_WAIT else: mode = os.P_NOWAIT tmp = os.spawnlp(mode, VCMD, VCMD, "-c", self.ctrlchnlname, "-q", "--", *args) if not wait: return tmp if tmp: self.warn("cmd exited with status %s: %s" % (tmp, str(args))) return tmp def cmdresult(self, args): ''' Execute a command on a node and return a tuple containing the exit status and result string. stderr output is folded into the stdout result string. ''' cmdid, cmdin, cmdout, cmderr = self.popen(args) result = cmdout.read() result += cmderr.read() cmdin.close() cmdout.close() cmderr.close() status = cmdid.wait() return (status, result) def popen(self, args): if USE_VCMD_MODULE: if not self.cmdchnl.connected(): raise ValueError("self.cmdchnl not connected") return self.cmdchnl.popen(args) else: cmd = [VCMD, "-c", self.ctrlchnlname, "--"] cmd.extend(args) tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE) return tmp, tmp.stdin, tmp.stdout, tmp.stderr def icmd(self, args): return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname, "--", *args) def redircmd(self, infd, outfd, errfd, args, wait = True): ''' Execute a command on a node with standard input, output, and error redirected according to the given file descriptors. ''' if not USE_VCMD_MODULE: raise NotImplementedError if not self.cmdchnl.connected(): raise ValueError("self.cmdchnl not connected") tmp = self.cmdchnl.redircmd(infd, outfd, errfd, args) if not wait: return tmp tmp = tmp.wait() if tmp: self.warn("cmd exited with status %s: %s" % (tmp, str(args))) return tmp def term(self, sh = "/bin/sh"): return os.spawnlp(os.P_NOWAIT, "xterm", "xterm", "-ut", "-title", self.name, "-e", VCMD, "-c", self.ctrlchnlname, "--", sh) def termcmdstring(self, sh = "/bin/sh"): return "%s -c %s -- %s" % (VCMD, self.ctrlchnlname, sh) def shcmd(self, cmdstr, sh = "/bin/sh"): return self.cmd([sh, "-c", cmdstr]) def getaddr(self, ifname, rescan = False): if ifname in self._addr and not rescan: return self._addr[ifname] tmp = {"ether": [], "inet": [], "inet6": [], "inet6link": []} cmd = [IP_BIN, "addr", "show", "dev", ifname] cmdid, cmdin, cmdout, cmderr = self.popen(cmd) cmdin.close() for line in cmdout: line = line.strip().split() if line[0] == "link/ether": tmp["ether"].append(line[1]) elif line[0] == "inet": tmp["inet"].append(line[1]) elif line[0] == "inet6": if line[3] == "global": tmp["inet6"].append(line[1]) elif line[3] == "link": tmp["inet6link"].append(line[1]) else: self.warn("unknown scope: %s" % line[3]) else: pass err = cmderr.read() cmdout.close() cmderr.close() status = cmdid.wait() if status: self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd)) if err: self.warn("error output: %s" % err) self._addr[ifname] = tmp return tmp def netifstats(self, ifname = None): stats = {} cmd = ["cat", "/proc/net/dev"] cmdid, cmdin, cmdout, cmderr = self.popen(cmd) cmdin.close() # ignore first line cmdout.readline() # second line has count names tmp = cmdout.readline().strip().split("|") rxkeys = tmp[1].split() txkeys = tmp[2].split() for line in cmdout: line = line.strip().split() devname, tmp = line[0].split(":") if tmp: line.insert(1, tmp) stats[devname] = {"rx": {}, "tx": {}} field = 1 for count in rxkeys: stats[devname]["rx"][count] = int(line[field]) field += 1 for count in txkeys: stats[devname]["tx"][count] = int(line[field]) field += 1 err = cmderr.read() cmdout.close() cmderr.close() status = cmdid.wait() if status: self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd)) if err: self.warn("error output: %s" % err) if ifname is not None: return stats[ifname] else: return stats def createclients(sessiondir, clientcls = VnodeClient, cmdchnlfilterfunc = None): direntries = [os.path.join(sessiondir, x) for x in os.listdir(sessiondir)] cmdchnls = [x for x in direntries if stat.S_ISSOCK(os.stat(x).st_mode)] if cmdchnlfilterfunc: cmdchnls = list(filter(cmdchnlfilterfunc, cmdchnls)) cmdchnls.sort() return [clientcls(os.path.basename(x), x) for x in cmdchnls] def createremoteclients(sessiondir, clientcls = VnodeClient, filterfunc = None): ''' Creates remote VnodeClients, for nodes emulated on other machines. The session.Broker writes a n1.conf/server file having the server's info. ''' direntries = [os.path.join(sessiondir, x) for x in os.listdir(sessiondir)] nodedirs = [x for x in direntries if stat.S_ISDIR(os.stat(x).st_mode)] nodedirs = [x for x in nodedirs if os.path.exists(os.path.join(x, "server"))] if filterfunc: nodedirs = list(filter(filterfunc, nodedirs)) nodedirs.sort() return [clientcls(x) for x in nodedirs]
from django.test import TestCase, Client from django.http import HttpRequest, Http404 from django.contrib.auth.models import User from wagtail.wagtailcore.models import Page, Site from wagtail.tests.models import EventPage class TestRouting(TestCase): fixtures = ['test.json'] def test_find_site_for_request(self): default_site = Site.objects.get(is_default_site=True) events_page = Page.objects.get(url_path='/home/events/') events_site = Site.objects.create(hostname='events.example.com', root_page=events_page) # requests without a Host: header should be directed to the default site request = HttpRequest() request.path = '/' self.assertEqual(Site.find_for_request(request), default_site) # requests with a known Host: header should be directed to the specific site request = HttpRequest() request.path = '/' request.META['HTTP_HOST'] = 'events.example.com' self.assertEqual(Site.find_for_request(request), events_site) # requests with an unrecognised Host: header should be directed to the default site request = HttpRequest() request.path = '/' request.META['HTTP_HOST'] = 'unknown.example.com' self.assertEqual(Site.find_for_request(request), default_site) def test_urls(self): default_site = Site.objects.get(is_default_site=True) homepage = Page.objects.get(url_path='/home/') christmas_page = Page.objects.get(url_path='/home/events/christmas/') # Basic installation only has one site configured, so page.url will return local URLs self.assertEqual(homepage.full_url, 'http://localhost/') self.assertEqual(homepage.url, '/') self.assertEqual(homepage.relative_url(default_site), '/') self.assertEqual(christmas_page.full_url, 'http://localhost/events/christmas/') self.assertEqual(christmas_page.url, '/events/christmas/') self.assertEqual(christmas_page.relative_url(default_site), '/events/christmas/') def test_urls_with_multiple_sites(self): events_page = Page.objects.get(url_path='/home/events/') events_site = Site.objects.create(hostname='events.example.com', root_page=events_page) default_site = Site.objects.get(is_default_site=True) homepage = Page.objects.get(url_path='/home/') christmas_page = Page.objects.get(url_path='/home/events/christmas/') # with multiple sites, page.url will return full URLs to ensure that # they work across sites self.assertEqual(homepage.full_url, 'http://localhost/') self.assertEqual(homepage.url, 'http://localhost/') self.assertEqual(homepage.relative_url(default_site), '/') self.assertEqual(homepage.relative_url(events_site), 'http://localhost/') self.assertEqual(christmas_page.full_url, 'http://events.example.com/christmas/') self.assertEqual(christmas_page.url, 'http://events.example.com/christmas/') self.assertEqual(christmas_page.relative_url(default_site), 'http://events.example.com/christmas/') self.assertEqual(christmas_page.relative_url(events_site), '/christmas/') def test_request_routing(self): homepage = Page.objects.get(url_path='/home/') christmas_page = EventPage.objects.get(url_path='/home/events/christmas/') request = HttpRequest() request.path = '/events/christmas/' response = homepage.route(request, ['events', 'christmas']) self.assertEqual(response.status_code, 200) self.assertEqual(response.context_data['self'], christmas_page) used_template = response.resolve_template(response.template_name) self.assertEqual(used_template.name, 'tests/event_page.html') def test_route_to_unknown_page_returns_404(self): homepage = Page.objects.get(url_path='/home/') request = HttpRequest() request.path = '/events/quinquagesima/' with self.assertRaises(Http404): homepage.route(request, ['events', 'quinquagesima']) def test_route_to_unpublished_page_returns_404(self): homepage = Page.objects.get(url_path='/home/') request = HttpRequest() request.path = '/events/tentative-unpublished-event/' with self.assertRaises(Http404): homepage.route(request, ['events', 'tentative-unpublished-event']) class TestServeView(TestCase): fixtures = ['test.json'] def setUp(self): # Explicitly clear the cache of site root paths. Normally this would be kept # in sync by the Site.save logic, but this is bypassed when the database is # rolled back between tests using transactions. from django.core.cache import cache cache.delete('wagtail_site_root_paths') def test_serve(self): response = self.client.get('/events/christmas/') self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'tests/event_page.html') christmas_page = EventPage.objects.get(url_path='/home/events/christmas/') self.assertEqual(response.context['self'], christmas_page) self.assertContains(response, '<h1>Christmas</h1>') self.assertContains(response, '<h2>Event</h2>') def test_serve_unknown_page_returns_404(self): response = self.client.get('/events/quinquagesima/') self.assertEqual(response.status_code, 404) def test_serve_unpublished_page_returns_404(self): response = self.client.get('/events/tentative-unpublished-event/') self.assertEqual(response.status_code, 404) def test_serve_with_multiple_sites(self): events_page = Page.objects.get(url_path='/home/events/') Site.objects.create(hostname='events.example.com', root_page=events_page) response = self.client.get('/christmas/', HTTP_HOST='events.example.com') self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'tests/event_page.html') christmas_page = EventPage.objects.get(url_path='/home/events/christmas/') self.assertEqual(response.context['self'], christmas_page) self.assertContains(response, '<h1>Christmas</h1>') self.assertContains(response, '<h2>Event</h2>') # same request to the default host should return a 404 c = Client() response = c.get('/christmas/', HTTP_HOST='localhost') self.assertEqual(response.status_code, 404) def test_serve_with_custom_context(self): response = self.client.get('/events/') self.assertEqual(response.status_code, 200) # should render the whole page self.assertContains(response, '<h1>Events</h1>') # response should contain data from the custom 'events' context variable self.assertContains(response, '<a href="/events/christmas/">Christmas</a>') def test_ajax_response(self): response = self.client.get('/events/', HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(response.status_code, 200) # should only render the content of includes/event_listing.html, not the whole page self.assertNotContains(response, '<h1>Events</h1>') self.assertContains(response, '<a href="/events/christmas/">Christmas</a>') class TestPageUrlTags(TestCase): fixtures = ['test.json'] def test_pageurl_tag(self): response = self.client.get('/events/') self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="/events/christmas/">Christmas</a>') def test_slugurl_tag(self): response = self.client.get('/events/christmas/') self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="/events/">Back to events index</a>') class TestPagePermission(TestCase): fixtures = ['test.json'] def test_nonpublisher_page_permissions(self): event_editor = User.objects.get(username='eventeditor') homepage = Page.objects.get(url_path='/home/') christmas_page = EventPage.objects.get(url_path='/home/events/christmas/') unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/') someone_elses_event_page = EventPage.objects.get(url_path='/home/events/someone-elses-event/') homepage_perms = homepage.permissions_for_user(event_editor) christmas_page_perms = christmas_page.permissions_for_user(event_editor) unpub_perms = unpublished_event_page.permissions_for_user(event_editor) someone_elses_event_perms = someone_elses_event_page.permissions_for_user(event_editor) self.assertFalse(homepage_perms.can_add_subpage()) self.assertTrue(christmas_page_perms.can_add_subpage()) self.assertTrue(unpub_perms.can_add_subpage()) self.assertTrue(someone_elses_event_perms.can_add_subpage()) self.assertFalse(homepage_perms.can_edit()) self.assertTrue(christmas_page_perms.can_edit()) self.assertTrue(unpub_perms.can_edit()) self.assertFalse(someone_elses_event_perms.can_edit()) # basic 'add' permission doesn't allow editing pages owned by someone else self.assertFalse(homepage_perms.can_delete()) self.assertFalse(christmas_page_perms.can_delete()) # cannot delete because it is published self.assertTrue(unpub_perms.can_delete()) self.assertFalse(someone_elses_event_perms.can_delete()) self.assertFalse(homepage_perms.can_publish()) self.assertFalse(christmas_page_perms.can_publish()) self.assertFalse(unpub_perms.can_publish()) self.assertFalse(homepage_perms.can_unpublish()) self.assertFalse(christmas_page_perms.can_unpublish()) self.assertFalse(unpub_perms.can_unpublish()) self.assertFalse(homepage_perms.can_publish_subpage()) self.assertFalse(christmas_page_perms.can_publish_subpage()) self.assertFalse(unpub_perms.can_publish_subpage()) self.assertFalse(homepage_perms.can_reorder_children()) self.assertFalse(christmas_page_perms.can_reorder_children()) self.assertFalse(unpub_perms.can_reorder_children()) self.assertFalse(homepage_perms.can_move()) self.assertFalse(christmas_page_perms.can_move()) # cannot move because this would involve unpublishing from its current location self.assertTrue(unpub_perms.can_move()) self.assertFalse(someone_elses_event_perms.can_move()) self.assertFalse(christmas_page_perms.can_move_to(unpublished_event_page)) # cannot move because this would involve unpublishing from its current location self.assertTrue(unpub_perms.can_move_to(christmas_page)) self.assertFalse(unpub_perms.can_move_to(homepage)) # no permission to create pages at destination self.assertFalse(unpub_perms.can_move_to(unpublished_event_page)) # cannot make page a child of itself def test_publisher_page_permissions(self): event_moderator = User.objects.get(username='eventmoderator') homepage = Page.objects.get(url_path='/home/') christmas_page = EventPage.objects.get(url_path='/home/events/christmas/') unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/') homepage_perms = homepage.permissions_for_user(event_moderator) christmas_page_perms = christmas_page.permissions_for_user(event_moderator) unpub_perms = unpublished_event_page.permissions_for_user(event_moderator) self.assertFalse(homepage_perms.can_add_subpage()) self.assertTrue(christmas_page_perms.can_add_subpage()) self.assertTrue(unpub_perms.can_add_subpage()) self.assertFalse(homepage_perms.can_edit()) self.assertTrue(christmas_page_perms.can_edit()) self.assertTrue(unpub_perms.can_edit()) self.assertFalse(homepage_perms.can_delete()) self.assertTrue(christmas_page_perms.can_delete()) # cannot delete because it is published self.assertTrue(unpub_perms.can_delete()) self.assertFalse(homepage_perms.can_publish()) self.assertTrue(christmas_page_perms.can_publish()) self.assertTrue(unpub_perms.can_publish()) self.assertFalse(homepage_perms.can_unpublish()) self.assertTrue(christmas_page_perms.can_unpublish()) self.assertFalse(unpub_perms.can_unpublish()) # cannot unpublish a page that isn't published self.assertFalse(homepage_perms.can_publish_subpage()) self.assertTrue(christmas_page_perms.can_publish_subpage()) self.assertTrue(unpub_perms.can_publish_subpage()) self.assertFalse(homepage_perms.can_reorder_children()) self.assertTrue(christmas_page_perms.can_reorder_children()) self.assertTrue(unpub_perms.can_reorder_children()) self.assertFalse(homepage_perms.can_move()) self.assertTrue(christmas_page_perms.can_move()) self.assertTrue(unpub_perms.can_move()) self.assertTrue(christmas_page_perms.can_move_to(unpublished_event_page)) self.assertTrue(unpub_perms.can_move_to(christmas_page)) self.assertFalse(unpub_perms.can_move_to(homepage)) # no permission to create pages at destination self.assertFalse(unpub_perms.can_move_to(unpublished_event_page)) # cannot make page a child of itself def test_inactive_user_has_no_permissions(self): user = User.objects.get(username='inactiveuser') christmas_page = EventPage.objects.get(url_path='/home/events/christmas/') unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/') christmas_page_perms = christmas_page.permissions_for_user(user) unpub_perms = unpublished_event_page.permissions_for_user(user) self.assertFalse(unpub_perms.can_add_subpage()) self.assertFalse(unpub_perms.can_edit()) self.assertFalse(unpub_perms.can_delete()) self.assertFalse(unpub_perms.can_publish()) self.assertFalse(christmas_page_perms.can_unpublish()) self.assertFalse(unpub_perms.can_publish_subpage()) self.assertFalse(unpub_perms.can_reorder_children()) self.assertFalse(unpub_perms.can_move()) self.assertFalse(unpub_perms.can_move_to(christmas_page)) def test_superuser_has_full_permissions(self): user = User.objects.get(username='superuser') homepage = Page.objects.get(url_path='/home/') root = Page.objects.get(url_path='/') unpublished_event_page = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/') homepage_perms = homepage.permissions_for_user(user) root_perms = root.permissions_for_user(user) unpub_perms = unpublished_event_page.permissions_for_user(user) self.assertTrue(homepage_perms.can_add_subpage()) self.assertTrue(root_perms.can_add_subpage()) self.assertTrue(homepage_perms.can_edit()) self.assertFalse(root_perms.can_edit()) # root is not a real editable page, even to superusers self.assertTrue(homepage_perms.can_delete()) self.assertFalse(root_perms.can_delete()) self.assertTrue(homepage_perms.can_publish()) self.assertFalse(root_perms.can_publish()) self.assertTrue(homepage_perms.can_unpublish()) self.assertFalse(root_perms.can_unpublish()) self.assertFalse(unpub_perms.can_unpublish()) self.assertTrue(homepage_perms.can_publish_subpage()) self.assertTrue(root_perms.can_publish_subpage()) self.assertTrue(homepage_perms.can_reorder_children()) self.assertTrue(root_perms.can_reorder_children()) self.assertTrue(homepage_perms.can_move()) self.assertFalse(root_perms.can_move()) self.assertTrue(homepage_perms.can_move_to(root)) self.assertFalse(homepage_perms.can_move_to(unpublished_event_page)) class TestPageQuerySet(TestCase): fixtures = ['test.json'] def test_live(self): pages = Page.objects.live() # All pages must be live for page in pages: self.assertTrue(page.live) # Check that the homepage is in the results homepage = Page.objects.get(url_path='/home/') self.assertTrue(pages.filter(id=homepage.id).exists()) def test_not_live(self): pages = Page.objects.not_live() # All pages must not be live for page in pages: self.assertFalse(page.live) # Check that "someone elses event" is in the results event = Page.objects.get(url_path='/home/events/someone-elses-event/') self.assertTrue(pages.filter(id=event.id).exists()) def test_page(self): homepage = Page.objects.get(url_path='/home/') pages = Page.objects.page(homepage) # Should only select the homepage self.assertEqual(pages.count(), 1) self.assertEqual(pages.first(), homepage) def test_not_page(self): homepage = Page.objects.get(url_path='/home/') pages = Page.objects.not_page(homepage) # Should select everything except for the homepage self.assertEqual(pages.count(), Page.objects.all().count() - 1) for page in pages: self.assertNotEqual(page, homepage) def test_descendant_of(self): events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.descendant_of(events_index) # Check that all pages descend from events index for page in pages: self.assertTrue(page.get_ancestors().filter(id=events_index.id).exists()) def test_descendant_of_inclusive(self): events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.descendant_of(events_index, inclusive=True) # Check that all pages descend from events index, includes event index for page in pages: self.assertTrue(page == events_index or page.get_ancestors().filter(id=events_index.id).exists()) # Check that event index was included self.assertTrue(pages.filter(id=events_index.id).exists()) def test_not_descendant_of(self): homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.not_descendant_of(events_index) # Check that no pages descend from events_index for page in pages: self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists()) # As this is not inclusive, events index should be in the results self.assertTrue(pages.filter(id=events_index.id).exists()) def test_not_descendant_of_inclusive(self): homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.not_descendant_of(events_index, inclusive=True) # Check that all pages descend from homepage but not events index for page in pages: self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists()) # As this is inclusive, events index should not be in the results self.assertFalse(pages.filter(id=events_index.id).exists()) def test_child_of(self): homepage = Page.objects.get(url_path='/home/') pages = Page.objects.child_of(homepage) # Check that all pages are children of homepage for page in pages: self.assertEqual(page.get_parent(), homepage) def test_not_child_of(self): events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.not_child_of(events_index) # Check that all pages are not children of events_index for page in pages: self.assertNotEqual(page.get_parent(), events_index) def test_ancestor_of(self): root_page = Page.objects.get(id=1) homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.ancestor_of(events_index) self.assertEqual(pages.count(), 2) self.assertEqual(pages[0], root_page) self.assertEqual(pages[1], homepage) def test_ancestor_of_inclusive(self): root_page = Page.objects.get(id=1) homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.ancestor_of(events_index, inclusive=True) self.assertEqual(pages.count(), 3) self.assertEqual(pages[0], root_page) self.assertEqual(pages[1], homepage) self.assertEqual(pages[2], events_index) def test_not_ancestor_of(self): root_page = Page.objects.get(id=1) homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.not_ancestor_of(events_index) # Test that none of the ancestors are in pages for page in pages: self.assertNotEqual(page, root_page) self.assertNotEqual(page, homepage) # Test that events index is in pages self.assertTrue(pages.filter(id=events_index.id).exists()) def test_not_ancestor_of_inclusive(self): root_page = Page.objects.get(id=1) homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.not_ancestor_of(events_index, inclusive=True) # Test that none of the ancestors or the events_index are in pages for page in pages: self.assertNotEqual(page, root_page) self.assertNotEqual(page, homepage) self.assertNotEqual(page, events_index) def test_parent_of(self): homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.parent_of(events_index) # Pages must only contain homepage self.assertEqual(pages.count(), 1) self.assertEqual(pages[0], homepage) def test_not_parent_of(self): homepage = Page.objects.get(url_path='/home/') events_index = Page.objects.get(url_path='/home/events/') pages = Page.objects.not_parent_of(events_index) # Pages must not contain homepage for page in pages: self.assertNotEqual(page, homepage) # Test that events index is in pages self.assertTrue(pages.filter(id=events_index.id).exists()) def test_sibling_of(self): events_index = Page.objects.get(url_path='/home/events/') event = Page.objects.get(url_path='/home/events/christmas/') pages = Page.objects.sibling_of(event) # Check that all pages are children of events_index for page in pages: self.assertEqual(page.get_parent(), events_index) # Check that the event is not included self.assertFalse(pages.filter(id=event.id).exists()) def test_sibling_of_inclusive(self): events_index = Page.objects.get(url_path='/home/events/') event = Page.objects.get(url_path='/home/events/christmas/') pages = Page.objects.sibling_of(event, inclusive=True) # Check that all pages are children of events_index for page in pages: self.assertEqual(page.get_parent(), events_index) # Check that the event is included self.assertTrue(pages.filter(id=event.id).exists()) def test_not_sibling_of(self): events_index = Page.objects.get(url_path='/home/events/') event = Page.objects.get(url_path='/home/events/christmas/') pages = Page.objects.not_sibling_of(event) # Check that all pages are not children of events_index for page in pages: if page != event: self.assertNotEqual(page.get_parent(), events_index) # Check that the event is included self.assertTrue(pages.filter(id=event.id).exists()) # Test that events index is in pages self.assertTrue(pages.filter(id=events_index.id).exists()) def test_not_sibling_of_inclusive(self): events_index = Page.objects.get(url_path='/home/events/') event = Page.objects.get(url_path='/home/events/christmas/') pages = Page.objects.not_sibling_of(event, inclusive=True) # Check that all pages are not children of events_index for page in pages: self.assertNotEqual(page.get_parent(), events_index) # Check that the event is not included self.assertFalse(pages.filter(id=event.id).exists()) # Test that events index is in pages self.assertTrue(pages.filter(id=events_index.id).exists()) def test_type(self): pages = Page.objects.type(EventPage) # Check that all objects are EventPages for page in pages: self.assertIsInstance(page.specific, EventPage) # Check that "someone elses event" is in the results event = Page.objects.get(url_path='/home/events/someone-elses-event/') self.assertTrue(pages.filter(id=event.id).exists()) def test_not_type(self): pages = Page.objects.not_type(EventPage) # Check that no objects are EventPages for page in pages: self.assertNotIsInstance(page.specific, EventPage) # Check that the homepage is in the results homepage = Page.objects.get(url_path='/home/') self.assertTrue(pages.filter(id=homepage.id).exists())
######################################################################## # # Kevin Wecht 4 November 2014 # # Bay Area Bicycle Share (BABS) Open Data Challenge # ######################################################################## # # This file begins a Qt window to support interactive # visualization of the BABS data. # # OUTLINE # functionname - description # ######################################################################## # Import modules required by these functions import sys from PyQt4 import QtGui, QtCore from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np import random import BabsFunctions import BabsClasses import pandas as pd #import pdb import itertools ######################################################################## # Define class to hold our main interactive window class MainWindow(QtGui.QWidget): """Python Class to hold our main interactive GUI window. Inherits methods from QtGui.QWidget. METHODS __init__ - initializes the window class initUI - initializes and draws the GUI grid initPlot - places Matplotlib canvas on the grid initOptions - places widgets on the grid to manipulate the plot""" def __init__(self): """Initialize the window class and its parent class.""" super(MainWindow, self).__init__() # This function initializes the GUI. self.initUI() def initUI(self): """Initialize the GUI.""" # Create grid layout on which the GUI will be based self.initGrid() # ---- Initialize different types of widgets on the grid self.initOptions() # Options for selecting what to plot self.initPlot() # After initializing the options #self.initOther() # Other things... #self.quitbutton = QtGui.QPushButton('Button',self) #self.grid.addWidget(self.quitbutton,self.gridParams.nrow-1,self.gridParams.ncol-1) # Set layout after all widgets have been placed on the grid self.setLayout(self.grid) # Set size of window and window title self.setGeometry(100, 100, 1450, 900) self.setWindowTitle('Review') # Set the focus of the GUI on the matplotlib window for click events self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus ) self.canvas.setFocus() # Show window! self.show() def initGrid(self): """Set up the grid layout on which the GUI will be based.""" # Create Grid self.grid = QtGui.QGridLayout() # Assign Parameters self.gridParams = BabsClasses.GridParams() # Set even scaling of columns when resizing the window for ii in range(self.gridParams.ncol): self.grid.setColumnStretch(ii,1) # Set default grid spacing self.grid.setSpacing(self.gridParams.spacing) # Set column width for the final five columns #for ii in range(self.optcol0,self.optcol1+1): # self. def initPlot(self): """Initialize plot in window.""" # Create figure and canvas to display plot self.figure = plt.figure() self.canvas = FigureCanvas(self.figure) self.toolbar= NavigationToolbar(self.canvas,self.canvas) # Place canvas on the grid self.grid.addWidget(self.canvas,self.gridParams.plotrow0, self.gridParams.plotcol0, self.gridParams.plotnrow, self.gridParams.plotncol) # Create axis on which to plot everything self.ax = self.figure.add_subplot(111) self.ax.hold(False) # Create the initial plot from the buttons already selected # during initOptions() self.updateplot(None) def initOptions(self): """Initialize widgets to control matplotlib plot.""" # PlotOptions object to hold the options for this plot self.PlotOptions = BabsClasses.PlotOptions() # Initialize BarPlot options self.initMainType() # X-axis: timeseries, histogram, ... self.initMainGroup() # number of rides, duration, ... self.initBinGroup() # how to bin the data self.initTimeBin() # If binning by time, what time step to use # Initialize Bar plot divisions self.initDivisions() # weather, region, customer type, ... # Initialize Line Overplot options self.initOverplots() # weather, number of rides, duration, ... # Initialize Data Filtering options self.initFilters() # date range, day of week, hour of day, region, weather, ... # Initialize Refresh and Quit Buttons self.initButtons() def EnableAll(self): """ Enable all widgets in the GUI. Called before disabling select widgets based on current selections. """ # Enable Main Type and Main Group widgets self.mainType.setEnabled(True) self.mainGroup.setEnabled(True) # Enable Time options and plot refresh button self.timeGroup.setEnabled(True) self.timeText.setEnabled(True) # Enable division radio buttons for button in self.divisionGroup.buttons(): button.setEnabled(True) # Enable all check boxes in the overplot options # Enable all check boxes in filtering options def DisableOptions(self,NewOptions): """ Disable some widgets based on currently selected widgets. For example, when showing number of rides vs. day of week, disable time averaging and disable the option of dividing bars by day of the week. """ # Begin by enabling all widgets self.EnableAll() # If plotting a timeseries, disable the bin options other than # Time (other) if NewOptions.typeid==0: NewOptions.binid = 0 # Force binid to be Time (other) self.binGroup.setCurrentIndex(0) # If plotting a histogram, disable Time (other) by # forcing that selection to change to Number of Rides if NewOptions.typeid==1: if NewOptions.binid==0: NewOptions.binid=1 self.binGroup.setCurrentIndex(1) # If binning data by day of week, hour of day, or region # don't set time manually if NewOptions.binid>=2: self.timeGroup.setEnabled(False) self.timeText.setEnabled(False) # If plotting by Hour of Day, day of week, or region, don't divide # the bar by hour of day, day of week, or region names = ['Day of Week','Hour of Day','Region'] inds = [2,3,4] for name,ind in zip(names,inds): if NewOptions.binid==ind: for button in self.divisionGroup.buttons(): if str(button.objectName())==name: button.setEnabled(False) # Disable the "Other" radio button in the divisions section for button in self.divisionGroup.buttons(): if str(button.objectName())=='Other': button.setEnabled(False) def setLabels(self,NewOptions): """ Sets plot title, axis labels, and tick marks for the current plot. """ # Set plot title = title0 + ' of ' + title1 title0 = ['Timeseries', 'Histogram'] title1 = ['Number of Rides', 'Ride Duration [minutes]', 'Ride Distance [km]'] title2 = ['Time (other)', 'Number of Rides', 'Day of Week', 'Hour of Day', 'Region'] title = (title0[NewOptions.typeid] + ' of ' + title1[NewOptions.barid] + ' binned by ' + title2[NewOptions.binid]) # Set y-axis label if NewOptions.typeid==1: ylabel = 'Number of Occurances' else: ylabel = title1[NewOptions.barid] if NewOptions.overtype!=[]: ylabel2 = NewOptions.overtype[0] # Set x-axis label if NewOptions.typeid==0: xlabel='Date' if NewOptions.typeid==1: xlabel=title2[NewOptions.binid] # Set x-tick labels xticks = [] # Set y-tick labels # Place all labels on the plot plt.title(title) self.ax.set_ylabel(ylabel) self.ax.set_xlabel(xlabel) if hasattr(self,'ax2'): self.ax2.set_ylabel(ylabel2) #plt.yticks() #plt.xticks() def initMainType(self): """Initialize widgets to control the type of bar plot""" # Group to hold the drop down list options buttons self.mainType = QtGui.QComboBox() # Label above the Bar Plot options top_label = QtGui.QLabel('Interacting with BABS data') top_label.setAlignment(QtCore.Qt.AlignCenter) thisfont = top_label.font() thisfont.setPointSize(24) top_label.setFont(thisfont) show_label = QtGui.QLabel('Show: ') show_label.setAlignment(QtCore.Qt.AlignCenter) # Radio Buttons button_names = ['Timeseries', 'Histogram']#, ] buttonlist = [] # Add each name to the drop down list self.mainType.addItems(button_names) # Upon item selection, call the method updateplot self.connect(self.mainType, QtCore.SIGNAL('activated(QString)'), self.updateplot) # Place widgets on grid rowoffset = self.gridParams.maintype_row0 self.grid.addWidget( top_label, self.gridParams.optrow0+0+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(0), 1, self.gridParams.optncol ) self.grid.addWidget( show_label, self.gridParams.optrow0+2+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(0)+1, 1, 2 ) self.grid.addWidget( self.mainType, self.gridParams.optrow0+2+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(1), 1, self.gridParams.nfiltercol ) def initMainGroup(self): # Group to hold the drop down list options buttons self.mainGroup = QtGui.QComboBox() # Label above the Bar Plot options of_label = QtGui.QLabel(' of ') of_label.setAlignment(QtCore.Qt.AlignCenter) # Drop down list options button_names = ['Number Rides', 'Duration', 'Distance'] buttonlist = [] # Add each name to the drop down list self.mainGroup.addItems(button_names) # Upon item selection, call the method updateplot self.connect(self.mainGroup, QtCore.SIGNAL('activated(QString)'), self.updateplot) # Place widgets on grid rowoffset = self.gridParams.maingroup_row0 coloffset = 2 self.grid.addWidget( of_label, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(0+coloffset), 1, self.gridParams.nfiltercol-1 ) self.grid.addWidget( self.mainGroup, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(1+coloffset)-1, 1, self.gridParams.nfiltercol ) def initBinGroup(self): """Create drop down list of ways to bin data for histogram.""" # Group to hold the bin drop down list self.binGroup = QtGui.QComboBox() # Message to the left of the text box bin_label = QtGui.QLabel('Bin by: ') bin_label.setAlignment(QtCore.Qt.AlignCenter) # Drop down list options button_names = ['Time (other)', 'Number of Rides', 'Day of Week', 'Hour of Day', 'Region'] buttonlist = [] # Add each name to the drop down list self.binGroup.addItems(button_names) # Upon selection, call the method updateplot self.connect(self.binGroup, QtCore.SIGNAL('activated(QString)'), self.updateplot) # Place widgets on grid rowoffset = self.gridParams.bingroup_row0 self.grid.addWidget( bin_label, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(0)+1, 1, self.gridParams.nfiltercol-1 ) self.grid.addWidget( self.binGroup, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(1), 1, self.gridParams.nfiltercol ) def initTimeBin(self): """Create text entry and combo box telling us how to bin observations in time.""" # Label to hold grid information set_label = QtGui.QLabel('Timestep: ') set_label.setAlignment(QtCore.Qt.AlignCenter) # Group to hold the time unit drop down list self.timeGroup = QtGui.QComboBox() # Label to the left of text entry and drop down lists self.timeText = QtGui.QLineEdit('7') # Drop down list options button_names = ['Hours', 'Days'] buttonlist = [] # Add each name to the drop down list self.timeGroup.addItems(button_names) self.timeGroup.setCurrentIndex(button_names.index('Days')) # Upon item selection, call the method updateplot self.connect(self.timeGroup, QtCore.SIGNAL('activated(QString)'), self.updateplot) # Place widgets on grid rowoffset = self.gridParams.timegroup_row0 self.grid.addWidget( set_label, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(2)+1, 1, 1 ) self.grid.addWidget( self.timeText, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(2)+2, 1, self.gridParams.nfiltercol-2 ) self.grid.addWidget( self.timeGroup, self.gridParams.optrow0+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(3), 1, self.gridParams.nfiltercol-1 ) def initDivisions(self): """Initialize widgets to divide each bar in the bar plot""" # Lots of radio buttons that determine if we should divide the # bars in the bar plot by different categories # (time of day, weather, day of week, type of customer, etc. # Group to hold the radio buttons self.divisionGroup = QtGui.QButtonGroup() # Label above the Division Options label_timeseries = QtGui.QLabel('Bar Division Options') label_timeseries.setAlignment(QtCore.Qt.AlignCenter) thisfont = label_timeseries.font() thisfont.setPointSize(16) label_timeseries.setFont(thisfont) # Radio Buttons button_names = ['None', 'Customer Type', 'Day of Week', 'Hour of Day', 'Region', 'Other'] buttonlist = [] # Add buttons to the group, no default checked counter = 0 for button in button_names: thisbutton = QtGui.QRadioButton(button) thisbutton.setObjectName(button) buttonlist.append(thisbutton) buttonlist[counter].clicked.connect(self.updateplot) self.divisionGroup.addButton(thisbutton) self.divisionGroup.setId(thisbutton, counter) counter += 1 # Make the group exclusive self.divisionGroup.setExclusive(True) # Set Default buttonlist[0].setChecked(True) # Place widgets on grid rowoffset = self.gridParams.divisiongroup_row0 self.grid.addWidget( label_timeseries, self.gridParams.optrow0+0+rowoffset, self.gridParams.optcol0, 1, self.gridParams.optncol, ) ijs = [value for value in itertools.product([0,1,2],repeat=2) if value[0]<2] for index in range(len(buttonlist)): self.grid.addWidget( buttonlist[index], self.gridParams.optrow0+ijs[index][0]+1+rowoffset, self.gridParams.optcol0+self.gridParams.nfiltercol*(ijs[index][1]+1), 1, self.gridParams.nfiltercol ) def initOverplots(self): """Initialize widgets to control the items to plot on top of the bar plot""" label_overplot = QtGui.QLabel('Overplot Options') label_overplot.setAlignment(QtCore.Qt.AlignCenter) thisfont = label_overplot.font() thisfont.setPointSize(16) label_overplot.setFont(thisfont) # Place label on widget grid rowoffset = self.gridParams.overgroup_row0 self.grid.addWidget( label_overplot, self.gridParams.optrow0+rowoffset, self.gridParams.optcol0, 1, self.gridParams.optncol ) # ---- Group to hold overplot check boxes and place on grid self.overGroup = QtGui.QButtonGroup() self.overGroup.setExclusive(False) types = ['Temperature (Min)', 'Temperature (Mean)', 'Temperature (Max)', 'Precipitation', 'Wind Speed (Mean)', 'Wind Speed (Max)'] buttonlist = [] counter = 0 ijs = [val for val in itertools.product(range(3),repeat=2) if val[0]<=1] for name,ij in zip(types,ijs): thisbutton = QtGui.QCheckBox(name,self) thisbutton.setObjectName(name) buttonlist.append(thisbutton) #buttonlist[counter].clicked.connect(self.updateplot) buttonlist[counter].setChecked(False) self.overGroup.addButton(thisbutton) self.overGroup.setId(thisbutton, counter) self.grid.addWidget(buttonlist[counter], self.gridParams.optrow0+rowoffset+1+ij[0], self.gridParams.optcol0+ij[1]*4+1, 1, 4) counter += 1 def initFilters(self): """Initialize widgets to filter the items in the time series""" # Label for filters label_timeseries = QtGui.QLabel('Filtering Options') label_timeseries.setAlignment(QtCore.Qt.AlignCenter) thisfont = label_timeseries.font() thisfont.setPointSize(16) label_timeseries.setFont(thisfont) # Place label widget on grid rowoffset = self.gridParams.filtergroup_row0 self.grid.addWidget( label_timeseries, self.gridParams.optrow0+rowoffset, self.gridParams.optcol0, 1, self.gridParams.optncol ) # ---- Group to hold Customer Type check boxes and place on grid label_customers = QtGui.QLabel('Customer Type') label_customers.setAlignment(QtCore.Qt.AlignCenter) colwidth = 3 thisrowoffset = 1 thiscoloffset = 1 self.grid.addWidget(label_customers, self.gridParams.optrow0+rowoffset+thisrowoffset, self.gridParams.optcol0+thiscoloffset, 1, colwidth) self.filterGroup_customer = QtGui.QButtonGroup() self.filterGroup_customer.setExclusive(False) self.filterGroup_customer.setObjectName('Customer Type') types = ['Subscriber', 'Customer'] buttonlist = [] counter = 0 ijs = [[0,0],[1,0]] for name,ij in zip(types,ijs): thisbutton = QtGui.QCheckBox(name,self) thisbutton.setObjectName(name) buttonlist.append(thisbutton) #buttonlist[counter].clicked.connect(self.updateplot) buttonlist[counter].setChecked(True) self.filterGroup_customer.addButton(thisbutton) self.filterGroup_customer.setId(thisbutton, counter) self.grid.addWidget(buttonlist[counter], self.gridParams.optrow0+rowoffset+thisrowoffset+1+ij[0], self.gridParams.optcol0+ij[1]+thiscoloffset, 1, colwidth) counter += 1 # --- Group to hold Region check boxes label_region = QtGui.QLabel('Region') label_region.setAlignment(QtCore.Qt.AlignCenter) colwidth = 3 thisrowoffset = 5 thiscoloffset = 1 self.grid.addWidget(label_region, self.gridParams.optrow0+rowoffset+thisrowoffset, self.gridParams.optcol0+thiscoloffset, 1, colwidth) self.filterGroup_region = QtGui.QButtonGroup() self.filterGroup_region.setExclusive(False) self.filterGroup_region.setObjectName('Region') types = ['San Francisco', 'San Jose', 'Mountain View', 'Redwood City', 'Palo Alto'] buttonlist = [] counter = 0 ijs = [[val,0] for val in range(len(types))] for name,ij in zip(types,ijs): thisbutton = QtGui.QCheckBox(name,self) thisbutton.setObjectName(name) buttonlist.append(thisbutton) #buttonlist[counter].clicked.connect(self.updateplot) buttonlist[counter].setChecked(True) self.filterGroup_region.addButton(thisbutton) self.filterGroup_region.setId(thisbutton, counter) self.grid.addWidget(buttonlist[counter], self.gridParams.optrow0+rowoffset+thisrowoffset+1+ij[0], self.gridParams.optcol0+ij[1]+thiscoloffset, 1, colwidth) counter += 1 # ---- Group to hold Day of Week check boxes label_dayofweek = QtGui.QLabel('Day of Week') label_dayofweek.setAlignment(QtCore.Qt.AlignCenter) colwidth = 3 thiscoloffset = 5 thisrowoffset = 1 self.grid.addWidget(label_dayofweek, self.gridParams.optrow0+rowoffset+thisrowoffset, self.gridParams.optcol0+thiscoloffset, 1, colwidth) self.filterGroup_dayofweek = QtGui.QButtonGroup() self.filterGroup_dayofweek.setExclusive(False) self.filterGroup_dayofweek.setObjectName('Day of Week') types = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'] buttonlist = [] counter = 0 ijs = [[val,0] for val in range(len(types))] for name,ij in zip(types,ijs): thisbutton = QtGui.QCheckBox(name,self) thisbutton.setObjectName(name) buttonlist.append(thisbutton) #buttonlist[counter].clicked.connect(self.updateplot) buttonlist[counter].setChecked(True) self.filterGroup_dayofweek.addButton(thisbutton) self.filterGroup_dayofweek.setId(thisbutton, counter) self.grid.addWidget(buttonlist[counter], self.gridParams.optrow0+rowoffset+thisrowoffset+1+ij[0], self.gridParams.optcol0+ij[1]+thiscoloffset, 1, colwidth) counter += 1 # ---- Group to hold Hour of Day check boxes label_hourofday = QtGui.QLabel('Hour of Day') label_hourofday.setAlignment(QtCore.Qt.AlignCenter) colwidth = 1 thiscoloffset = 9 thisrowoffset = 1 self.grid.addWidget(label_hourofday, self.gridParams.optrow0+rowoffset+thisrowoffset, self.gridParams.optcol0+thiscoloffset, 1, colwidth*3) self.filterGroup_hourofday = QtGui.QButtonGroup() self.filterGroup_hourofday.setExclusive(False) self.filterGroup_hourofday.setObjectName('Hour of Day') types = [str(val) for val in range(24)] buttonlist = [] counter = 0 ijs = [val for val in itertools.product(range(8),repeat=2) if val[1]<=2] for name,ij in zip(types,ijs): thisbutton = QtGui.QCheckBox(name,self) thisbutton.setObjectName(name) buttonlist.append(thisbutton) #buttonlist[counter].clicked.connect(self.updateplot) buttonlist[counter].setChecked(True) self.filterGroup_hourofday.addButton(thisbutton) self.filterGroup_hourofday.setId(thisbutton, counter) self.grid.addWidget(buttonlist[counter], self.gridParams.optrow0+rowoffset+thisrowoffset+1+ij[0], self.gridParams.optcol0+ij[1]+thiscoloffset, 1, colwidth) counter += 1 # ---- Group to hold date range calendar objects def initButtons(self): """Initialize Refresh and Quit buttons at the bottom of the window""" # Button to refresh plot after entering text self.buttonRefresh = QtGui.QPushButton('Refresh Plot',self) self.buttonRefresh.clicked.connect(self.updateplot) # Button to reset plot to default values self.buttonReset = QtGui.QPushButton('Reset All',self) self.buttonReset.clicked.connect(self.resetplot) # Button to close window self.buttonQuit = QtGui.QPushButton('Quit Window',self) self.buttonQuit.clicked.connect(QtCore.QCoreApplication.instance().quit) # Place buttons on the grid self.grid.addWidget(self.buttonRefresh, self.gridParams.nrow-1,self.gridParams.ncol-3*self.gridParams.nfiltercol, 1, self.gridParams.nfiltercol-1) self.grid.addWidget(self.buttonReset, self.gridParams.nrow-1,self.gridParams.ncol-2*self.gridParams.nfiltercol, 1, self.gridParams.nfiltercol-1) self.grid.addWidget(self.buttonQuit, self.gridParams.nrow-1,self.gridParams.ncol-1*self.gridParams.nfiltercol, 1, self.gridParams.nfiltercol-1) def resetplot(self,state): """ Reset PlotOptions to default values and make the original plot. """ # Get Initial plot options and redraw plot self.initOptions() # Options for selecting what to plot self.initPlot() # After initializing the options def updateplot(self,state): """Set up plot options based on which buttons are checked. Then, call the plottng function (plotbar) with the new options""" # Initialize instance of plot options class. # These options will replace the existing options. newoptions = BabsClasses.PlotOptions() # Get new options from the current widget selections newoptions.populate(self) # Disable some widgets based on currently selected widgets self.DisableOptions(newoptions) # Call plotting routine, passing the newly constructed instance # of the PlotOptions class. self.clearplot() self.plotbar(newoptions) def clearplot(self): """ Clear secondary axis from plot before plotting again.""" if hasattr(self,'ax2'): for ii in range(len(self.ax2.lines)): self.ax2.lines[0].remove() self.overplotlegend.remove() self.ax2.set_ylabel('') self.ax2.set_yticklabels(['']*5) self.canvas.draw() def plotbar(self,NewOptions): """Plots the bar plot. INPUT self.options - Options class object that determines what to plot. See class PlotOptions? for more information.""" # Self.PlotOptions holds option information of old plot # newoptions holds the option information of the new plot # OUTLINE # 1. Gather data based on plot options # 2. Make the plot # 3. Replace self.PlotOptions with NewOptions # Get data to plot on the bar plot. # Main Type: Timeseries if NewOptions.typeid==0: # Main Group: Number of Rides if NewOptions.barid==0: basedata = BabsFunctions.getdata('trip',NewOptions) # Create Pandas data frame to hold all information tempdf = pd.DataFrame( basedata['Trip ID'].resample( NewOptions.dT, how='count' ).fillna(0) ) tempdf.columns = ['Number of Rides'] # Calculate values for each sub-division of the data set if NewOptions.division!=[]: types = NewOptions.division_types for ii in range(len(types)): # Make column of zeros for each bicycle ride column = pd.DataFrame( np.zeros(len(basedata)), index=basedata.index ) # Place a 1 in each row associated with this particular type if NewOptions.division=='Customer Type': column.loc[basedata['Subscription Type']==types[ii]] = 1 elif NewOptions.division=='Day of Week': column.loc[basedata.index.dayofweek==ii] = 1 elif NewOptions.division=='Hour of Day': column.loc[basedata.index.hour==ii] = 1 elif NewOptions.division=='Region': column.loc[basedata['region']==types[ii]] = 1 # Count the number of ones in each part of the timeseries column = column.resample( NewOptions.dT, how=np.sum ).fillna(0) if not column.empty: tempdf[types[ii]] = column # Drop original item in the pandas dataframe if NewOptions.division!='None': tempdf.drop('Number of Rides',axis=1,inplace=True) # Main Group: Duration of rides if NewOptions.barid==1: basedata = BabsFunctions.getdata('trip',NewOptions) tempdf = basedata[['Duration']] tempdf = tempdf.resample( NewOptions.dT, how=np.median ).fillna(0) tempdf.columns = ['Duration of Ride'] # Calculate values for each sub-division of the data set if NewOptions.division=='Customer Type': barcolors = [] # Add Number of Subscribers to the data frame types = ['Subscriber','Customer'] for ii in range(len(types)): column = basedata[['Duration']] column.loc[basedata['Subscription Type']==types[ii]] = 1 column = column.resample( NewOptions.dT, how=np.median ).fillna(0) tempdf[types[ii]] = column[0] # Drop total Number from the Data Frame tempdf.drop('Duration of Rides',axis=1,inplace=True) #elif NewOptions.division=='Another Division': # do similar things # Main Type: Histogram elif NewOptions.typeid==1: # Number of Rides if NewOptions.barid==0: # Get column of number of rides in the basedata basedata = BabsFunctions.getdata('trip', NewOptions) # Group the basedata into divisions indicated by the bin ID if NewOptions.binid==1: tempdf = basedata.resample( NewOptions.dT, how='count' ).fillna(0)['Trip ID'] count, divisions = np.histogram(tempdf, bins=20) # Put histogram into dataframe tempdf = pd.DataFrame(count, index=divisions[:-1]+(divisions[1]-divisions[0])/2.) if NewOptions.binid==2: tempdf = pd.DataFrame(basedata['Trip ID'].groupby(basedata.index.dayofweek).count()) if NewOptions.binid==3: tempdf = pd.DataFrame(basedata['Trip ID'].groupby(basedata.index.hour).count()) if NewOptions.binid==4: tempdf = pd.DataFrame(basedata['Trip ID'].groupby(basedata['region']).count()) tempdf.index.name = 'Number of Rides' tempdf.columns = ['Number of Rides'] # Divide bars for plotting, if indicated if NewOptions.division!='None': types = NewOptions.division_types for ii in range(len(types)): column = BabsFunctions.get_column(basedata,ii,NewOptions) if NewOptions.binid==1: column = pd.DataFrame( column.resample( NewOptions.dT, how='count' ).fillna(0), columns=['ones'] ) thistypefrac = BabsFunctions.typefraction(column,divisions) elif NewOptions.binid==2: thistypefrac = column.groupby(column.index.dayofweek).count() elif NewOptions.binid==3: thistypefrac = column.groupby(column.index.hourofday).count() elif NewOptions.binid==4: thistypefrac = column.groupby(column.index).count() # Add the value for this type to the dataframe if not column.empty: tempdf[types[ii]] = thistypefrac # Drop original item in the pandas dataframe if NewOptions.division!='None': tempdf.drop('Number of Rides',axis=1,inplace=True) # Create the barplot # Calculate width of bars # Regularly spaced time series if NewOptions.typeid==0: width = 1.0*(tempdf.index[1]-tempdf.index[0]).days # Histogram else: diffs = [tempdf.index[ind+1] - tempdf.index[ind] for ind in range(len(tempdf.index)-1)] width = 1.0*(min(diffs)) #width = 1.0*(tempdf.index[1]-tempdf.index[0]) # Calculate barplot colors types = NewOptions.division_types if NewOptions.division=='None': barcolors = ['b'] elif NewOptions.division=='Customer Type': barcolors = cm.rainbow( np.linspace(0,1,len(types)) ) elif NewOptions.division=='Day of Week': barcolors = cm.rainbow( np.linspace(0,1,len(types)) ) elif NewOptions.division=='Hour of Day': barcolors = cm.hsv( np.linspace(0,1,len(types)) ) elif NewOptions.division=='Region': barcolors = cm.jet( np.linspace(0,1,len(types)) ) # Create the bars on the bar plot bars = [] # list of handles for each bar in barplot for ii in range(len(tempdf.columns)): # Save previous plot's y-value for base of next plot if ii==0: previous = np.zeros(len(tempdf.iloc[:,ii])) # Add the bar to the axis. If plotting many (divisions), make sure # to set ax.hold(True) after the first call if len(tempdf.columns)==1: colorindex = 0 else: colorindex = types.index(tempdf.columns[ii]) thisbar = self.ax.bar( tempdf.index, tempdf.iloc[:,ii], width, bottom=previous, color=barcolors[ii] ) bars.append(thisbar) self.ax.hold(True) previous = previous + tempdf.iloc[:,ii] # Update all other lines to overplot if NewOptions.overtype!=[]: # Initialize the new plot self.ax2 = self.ax.twinx() self.ax2.hold(True) # Get data. overdata = BabsFunctions.getdata('weather', NewOptions) overlines = [] # Add each new plot to the axis for name in NewOptions.overtype: # Match names in NewOptions.overtype to column names in overdata if name=='Temperature (Min)': column='Min_TemperatureF' color='#00bfff' if name=='Temperature (Mean)': column='Mean_Temperature_F' color='#3cb371' if name=='Temperature (Max)': column='Max_Temperature_F' color='#ff0000' if name=='Precipitation': column='Precipitation_In ' color='#888888' if name=='Wind Speed (Mean)': column='Mean_Wind_Speed_MPH ' color='#da70d6' if name=='Wind Speed (Max)': column='Max_Gust_Speed_MPH' color='#9400d3' # Average weather data into bins corresponding to the x-axis of the main plot thisdata = BabsFunctions.bin_weather(overdata[column], tempdf, NewOptions ) if NewOptions.typeid==0: thisdata.index = thisdata.index + (thisdata.index[1]-thisdata.index[0])//2 else: thisdata.index = thisdata.index + 0.5*(thisdata.index[1]-thisdata.index[0]) # Make a line to show this data if name[0:11]=='Temperature': baserange = 50. basemin = 25 basemax = 95 overlines.append( self.ax2.plot(thisdata.index, thisdata,color=color,lw=4, label=name)) self.ax2.set_ylim([basemin,basemax]) else: # If we have already plotted something, scale this plot to a range if overlines!=[]: axlimit = self.ax2.get_ylim() thisrange = thisdata.max()-thisdata.min() thisdata = thisdata-thisdata.min() thisdata = thisdata*(baserange/thisrange) thisdata = thisdata + basemin # Otherwise don't scale the information else: baserange = thisdata.max()-thisdata.min() basemin = thisdata.min()-0.2*baserange basemax = thisdata.max()+0.2*baserange self.ax2.set_ylim([basemin,basemax]) # Plot the non-temperature information on the plot overlines.append( self.ax2.plot(thisdata.index, thisdata,color=color,lw=4, label=name)) # Create legend for this second axis #ooga=booga self.ax2.hold(False) self.overplotlegend = self.ax2.legend(loc=2)# (line for line in overlines), #(col for col in overdata) ) #self.overplotlegend.draggable() # Reset ax.hold(False) after placing everything on the plot self.ax.hold(False) # Make a legend for the figure self.plotlegend = self.ax.legend( (bar[0] for bar in bars), (col for col in tempdf.columns) ) self.plotlegend.draggable() # Add titles and labels # Plot title and axis labels self.setLabels(NewOptions) # Refresh the canvas self.canvas.draw() # Resent plot options with the new options self.PlotOptions = NewOptions def main(): app = QtGui.QApplication(sys.argv) ex = MainWindow() sys.exit(app.exec_()) if __name__ == '__main__': main()
"""Provides classes used to construct a full ``Choices`` instance. Notes ----- The documentation format in this file is numpydoc_. .. _numpydoc: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ from __future__ import unicode_literals try: from collections.abc import Mapping except ImportError: from collections import Mapping from django.utils.functional import Promise class ChoiceAttributeMixin(object): """Base class to represent an attribute of a ``ChoiceEntry``. Used for ``constant``, ``name``, and ``display``. It must be used as a mixin with another type, and the final class will be a type with added attributes to access the ``ChoiceEntry`` instance and its attributes. Attributes ---------- choice_entry : instance of ``ChoiceEntry`` The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. constant : property Returns the choice field holding the constant of the attached ``ChoiceEntry``. value : property Returns the choice field holding the value of the attached ``ChoiceEntry``. display : property Returns the choice field holding the display name of the attached ``ChoiceEntry``. original_value : ? The value used to create the current instance. creator_type : type The class that created a new class. Will be ``ChoiceAttributeMixin`` except if it was overridden by the author. Example ------- Classes can be created manually: >>> class IntChoiceAttribute(ChoiceAttributeMixin, int): pass >>> field = IntChoiceAttribute(1, ChoiceEntry(('FOO', 1, 'foo'))) >>> field 1 >>> field.constant, field.value, field.display ('FOO', 1, 'foo') >>> field.choice_entry ('FOO', 1, 'foo') Or via the ``get_class_for_value`` class method: >>> klass = ChoiceAttributeMixin.get_class_for_value(1.5) >>> klass.__name__ 'FloatChoiceAttribute' >>> float in klass.mro() True """ def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument """Construct the object (the other class used with this mixin). Notes ----- Only passes the very first argument to the ``super`` constructor. All others are not needed for the other class, only for this mixin. """ if issubclass(cls, Promise): # Special case to manage lazy django stuff like ugettext_lazy return super(ChoiceAttributeMixin, cls).__new__(cls) return super(ChoiceAttributeMixin, cls).__new__(cls, *args[:1]) def __init__(self, value, choice_entry): """Initiate the object to save the value and the choice entry. Parameters ---------- value : ? Value to pass to the ``super`` constructor (for the other class using this mixin) choice_entry: ChoiceEntry The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. Notes ----- Call the ``super`` constructor with only the first value, as the other class doesn't expect the ``choice_entry`` parameter. """ if isinstance(self, Promise): # Special case to manage lazy django stuff like ugettext_lazy # pylint: disable=protected-access super(ChoiceAttributeMixin, self).__init__(value._proxy____args, value._proxy____kw) else: super(ChoiceAttributeMixin, self).__init__() self.original_value = value self.choice_entry = choice_entry if self.choice_entry.attributes: for key, value in self.choice_entry.attributes.items(): setattr(self, key, value) @property def constant(self): """Property that returns the ``constant`` attribute of the attached ``ChoiceEntry``.""" return self.choice_entry.constant @property def value(self): """Property that returns the ``value`` attribute of the attached ``ChoiceEntry``.""" return self.choice_entry.value @property def display(self): """Property that returns the ``display`` attribute of the attached ``ChoiceEntry``.""" return self.choice_entry.display @classmethod def get_class_for_value(cls, value): """Class method to construct a class based on this mixin and the type of the given value. Parameters ---------- value: ? The value from which to extract the type to create the new class. Notes ----- The create classes are cached (in ``cls.__classes_by_type``) to avoid recreating already created classes. """ type_ = value.__class__ # Check if the type is already a ``ChoiceAttribute`` if issubclass(type_, ChoiceAttributeMixin): # In this case we can return this type return type_ # Create a new class only if it wasn't already created for this type. if type_ not in cls._classes_by_type: # Compute the name of the class with the name of the type. class_name = str('%sChoiceAttribute' % type_.__name__.capitalize()) # Create a new class and save it in the cache. cls._classes_by_type[type_] = type(class_name, (cls, type_), { 'creator_type': cls, }) # Return the class from the cache based on the type. return cls._classes_by_type[type_] def __reduce__(self): """Reducer to make the auto-created classes picklable. Returns ------- tuple A tuple as expected by pickle, to recreate the object when calling ``pickle.loads``: 1. a callable to recreate the object 2. a tuple with all positioned arguments expected by this callable """ return ( # Function to create a choice attribute create_choice_attribute, ( # The class that created the class of the current value self.creator_type, # The original type of the current value self.original_value, # The tied `choice_entry` self.choice_entry ) ) def __bool__(self): """Use the original value to know if the value is truthy of falsy""" return bool(self.original_value) _classes_by_type = {} def create_choice_attribute(creator_type, value, choice_entry): """Create an instance of a subclass of ChoiceAttributeMixin for the given value. Parameters ---------- creator_type : type ``ChoiceAttributeMixin`` or a subclass, from which we'll call the ``get_class_for_value`` class-method. value : ? The value for which we want to create an instance of a new subclass of ``creator_type``. choice_entry: ChoiceEntry The ``ChoiceEntry`` instance that hold the current value, used to access its constant, value and display name. Returns ------- ChoiceAttributeMixin An instance of a subclass of ``creator_type`` for the given value """ klass = creator_type.get_class_for_value(value) return klass(value, choice_entry) class ChoiceEntry(tuple): """Represents a choice in a ``Choices`` object, with easy access to its attribute. Expecting a tuple with three entries. (constant, value, display name), it will add three attributes to access then: ``constant``, ``value`` and ``display``. By passing a dict after these three first entries, in the tuple, it's also possible to add some other attributes to the ``ChoiceEntry` instance``. Parameters ---------- tuple_ : tuple A tuple with three entries, the name of the constant, the value, and the display name. A dict could be added as a fourth entry to add additional attributes. Example ------- >>> entry = ChoiceEntry(('FOO', 1, 'foo')) >>> entry ('FOO', 1, 'foo') >>> (entry.constant, entry.value, entry.display) ('FOO', 1, 'foo') >>> entry.choice (1, 'foo') You can also pass attributes to add to the instance to create: >>> entry = ChoiceEntry(('FOO', 1, 'foo', {'bar': 1, 'baz': 2})) >>> entry ('FOO', 1, 'foo') >>> entry.bar 1 >>> entry.baz 2 Raises ------ AssertionError If the number of entries in the tuple is not expected. Must be 3 or 4. """ # Allow to easily change the mixin to use in subclasses. ChoiceAttributeMixin = ChoiceAttributeMixin def __new__(cls, tuple_): """Construct the tuple with 3 entries, and save optional attributes from the 4th one.""" # Ensure we have exactly 3 entries in the tuple and an optional dict. assert 3 <= len(tuple_) <= 4, 'Invalid number of entries in %s' % (tuple_,) attributes = None if len(tuple_) == 4: attributes = tuple_[3] assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (tuple_,) if attributes: for invalid_key in {'constant', 'value', 'display'}: assert invalid_key not in attributes, 'Additional attributes cannot contain one named "%s" in %s' % (invalid_key, tuple_,) # Call the ``tuple`` constructor with only the real tuple entries. obj = super(ChoiceEntry, cls).__new__(cls, tuple_[:3]) # Save all special attributes. # pylint: disable=protected-access obj.attributes = attributes obj.constant = obj._get_choice_attribute(tuple_[0]) obj.value = obj._get_choice_attribute(tuple_[1]) obj.display = obj._get_choice_attribute(tuple_[2]) # Add an attribute holding values as expected by django. obj.choice = (obj.value, obj.display) # Add additional attributes. if attributes: for key, value in attributes.items(): setattr(obj, key, value) return obj def _get_choice_attribute(self, value): """Get a choice attribute for the given value. Parameters ---------- value: ? The value for which we want a choice attribute. Returns ------- An instance of a class based on ``ChoiceAttributeMixin`` for the given value. Raises ------ ValueError If the value is None, as we cannot really subclass NoneType. """ if value is None: raise ValueError('Using `None` in a `Choices` object is not supported. You may ' 'use an empty string.') return create_choice_attribute(self.ChoiceAttributeMixin, value, self) def __reduce__(self): """Reducer to pass attributes when pickling. Returns ------- tuple A tuple as expected by pickle, to recreate the object when calling ``pickle.loads``: 1. a callable to recreate the object 2. a tuple with all positioned arguments expected by this callable """ return ( # The ``ChoiceEntry`` class, or a subclass, used to create the current instance self.__class__, # The original values of the tuple, and attributes (we pass a tuple as single argument) ( ( self.constant.original_value, self.value.original_value, self.display.original_value, self.attributes ), ) )
#! /bin/env python2 # coding: utf-8 import numpy as np import networkx as nx import matplotlib.pyplot as plt import random as rd from pickle import dump class Data: def __init__(self): self.m_list1 = [] self.m_list2 = [] N = 100 M = 100 MAX = N + M + 1 MAX_EDGE = 380 MAX_DEG = 450 ITERATIONS = 50000 S1 = 0. T1 = 1. S2 = 0. T2 = 1. beta = 0.5 NUMGRAPH = 10 NSIM = 10 NAME = "lucasmedeiros" # initial fraction of cooperators p1, p2 = .5, .5 # number of cooperators cc1, cc2 = 0, 0 # fraction of cooperators r1, r2 = np.zeros(ITERATIONS + 1, dtype=np.float), np.zeros(ITERATIONS + 1, dtype=np.float) payoff = np.array( [ [1, S1], [T1, 0] ] , dtype=np.float, ndmin=2) payoff2 = np.array( [ [1, S2], [T2, 0] ] , dtype=np.float, ndmin=2) def interaction(x, y): if x < N: return payoff[g.node[x]['strategy']][g.node[y]['strategy']] else: return payoff2[g.node[x]['strategy']][g.node[y]['strategy']] def change_prob(x, y): return 1. / (1 + np.exp(-beta * (y - x))) def complete(): return nx.complete_bipartite_graph(N, M) def random(): g = nx.Graph() g.add_nodes_from(np.arange(0, N + M, 1, dtype=np.int)) while g.number_of_edges() < MAX_EDGE: a, b = rd.randint(0, N - 1), rd.randint(N, N + M - 1) if b not in g[a]: g.add_edge(a, b) return g def set_initial_strategy(g): global cc1, cc2 coop = range(0, int(p1 * N), 1) + range(N, int(p2 * M) + N, 1) cc1 = int(p1 * N) defect = set(range(0, N + M, 1)) - set(coop) cc2 = int(p2 * M) coop = dict(zip(coop, len(coop) * [0])) defect = dict(zip(defect, len(defect) * [1])) nx.set_node_attributes(g, 'strategy', coop) nx.set_node_attributes(g, 'strategy', defect) def fitness(x): ret = 0 for i in g.neighbors(x): ret += interaction(x, i) return ret def simulate(): global cc1, cc2 it = 0 while it < ITERATIONS: it += 1 if it % 2: a = rd.randint(0, N - 1) else: a = rd.randint(N, N + M - 1) if len(g.neighbors(a)) == 0: it -= 1 continue b = g.neighbors(a)[rd.randint(0, len(g.neighbors(a)) - 1)] b = g.neighbors(b)[rd.randint(0, len(g.neighbors(b)) - 1)] if a == b: it -= 1 continue assert (a < N and b < N) or (a >= N and b >= N) if g.node[a]['strategy'] != g.node[b]['strategy']: fa, fb = fitness(a), fitness(b) l = np.random.random() p = change_prob(fa, fb) if l <= p: if a < N: if g.node[a]['strategy'] == 0: cc1 -= 1 else: cc1 += 1 else: if g.node[a]['strategy'] == 0: cc2 -= 1 else: cc2 += 1 nx.set_node_attributes(g, 'strategy', { a:g.node[b]['strategy'] }) r1[it] = float(cc1) / N r2[it] = float(cc2) / M nbins = 10 T1range = np.linspace(1,2,3) S1range = np.linspace(-1,0,3) T2range = np.linspace(1,2,nbins) S2range = np.linspace(-1,0,nbins) mag1 = np.zeros((nbins, nbins), dtype=np.float) mag2 = np.zeros((nbins, nbins), dtype=np.float) for G in xrange(NUMGRAPH): g = random() data = Data() for S1 in S1range: for T1 in T1range: global payoff, payoff2 mag1 = np.zeros((nbins, nbins), dtype=np.float) mag2 = np.zeros((nbins, nbins), dtype=np.float) i = 0 payoff = np.array([ [1, S1], [T1, 0]], dtype=np.float, ndmin=2) for S2 in S2range: j = 0 for T2 in T2range: payoff2 = np.array([ [1, S2], [T2, 0]], dtype=np.float, ndmin=2) for SS in xrange(NSIM): set_initial_strategy(g) simulate() mag1[i][j] = np.mean(r1[-1000:]) mag2[i][j] = np.mean(r2[-1000:]) j += 1 i += 1 mag1 /= NSIM mag2 /= NSIM data.m_list1.append((S1, T1, S2, T2, mag1)) data.m_list2.append((S1, T1, S2, T2, mag2)) f = open('random graph {1} {0}.grph'.format(G, NAME), 'w') dump(data,f,2) f.close() print("Finished Random Graph {0}".format(G)) g = complete() data = Data() mag1 = np.zeros((nbins, nbins), dtype=np.float) mag2 = np.zeros((nbins, nbins), dtype=np.float) for S1 in S1range: for T1 in T1range: global payoff, payoff2 i = 0 payoff = np.array([ [1, S1], [T1, 0]], dtype=np.float, ndmin=2) for S2 in S2range: j = 0 for T2 in T2range: payoff2 = np.array([ [1, S2], [T2, 0]], dtype=np.float, ndmin=2) for SS in xrange(NSIM): set_initial_strategy(g) simulate() mag1[i][j] += np.mean(r1[-1000:]) mag2[i][j] += np.mean(r2[-1000:]) j += 1 i += 1 mag1 /= NSIM mag2 /= NSIM data.m_list1.append((S1, T1, S2, T2, mag1)) data.m_list2.append((S1, T1, S2, T2, mag2)) f = open('complete graph {1} {0}.grph'.format(G, NAME), 'w') dump(data,f,2) f.close() print("Finished Complete Graph") p = './graphs/' sc_graphs = [] for _,_,c in os.walk(p): for a,x in enumerate(c): pp = os.path.join(p,x) f = open(pp, 'r') g = load(f) sc_graphs.append(g) for G, g in sc_graphs: data = Data() mag1 = np.zeros((nbins, nbins), dtype=np.float) mag2 = np.zeros((nbins, nbins), dtype=np.float) for S1 in S1range: for T1 in T1range: global payoff, payoff2 i = 0 payoff = np.array([ [1, S1], [T1, 0]], dtype=np.float, ndmin=2) for S2 in S2range: j = 0 for T2 in T2range: payoff2 = np.array([ [1, S2], [T2, 0]], dtype=np.float, ndmin=2) for SS in xrange(NSIM): set_initial_strategy(g) simulate() mag1[i][j] += np.mean(r1[-1000:]) mag2[i][j] += np.mean(r2[-1000:]) j += 1 i += 1 mag1 /= NSIM mag2 /= NSIM data.m_list1.append((S1, T1, S2, T2, mag1)) data.m_list2.append((S1, T1, S2, T2, mag2)) f = open('scalefree graph {1} {0}.grph'.format(G, NAME), 'w') dump(data,f,2) f.close() print("Finished Graph {0}".format(G))
""" TODO """ __author__ = 'Chris Jones, Kenneth Love' __version__ = '0.8.5' __license__ = 'MIT' import re from flask import Flask, render_template, redirect, url_for, request, flash from flask import session, jsonify from flaskext.wtf import Form, TextField, Required, IntegerField, FloatField import redis from redis.exceptions import ConnectionError, ResponseError import settings SECRET_KEY = '781b0650af13493089a6ffafac755a61' app = Flask(__name__) app.config.from_object(__name__) app.debug = True # Forms class KeyForm(Form): key_name = TextField('Key', validators=[Required()]) key_ttl = IntegerField('TTL') class StringForm(KeyForm): key_value = TextField('Value', validators=[Required()]) class ListSetForm(KeyForm): """ Form for creating a new set or list """ member = TextField('Member', validators=[Required()]) class HashForm(KeyForm): """ Form for creating a hash. """ member_name = TextField('Member', validators=[Required()]) member_value = TextField('Value', validators=[Required()]) class ZSetForm(KeyForm): """ Form for creating a ZSet """ member_name = TextField('Member', validators=[Required()]) member_score = FloatField('Score', validators=[Required()]) # Context processors @app.context_processor def get_db_size(): r = get_redis_connection(session) if not r: return {'db_size':0} return dict(db_size=r.dbsize()) # Control app flow def get_redis_connection(session): """ Get Redis connection with session values. Ping Redis to make sure connection is working. """ r = redis.Redis( host=session.get('redis_host', settings.REDIS_HOST), port=session.get('redis_port', settings.REDIS_PORT), db=session.get('redis_db', settings.REDIS_DB), password=session.get('redis_password', '')) try: r.ping() except (ConnectionError, ResponseError): return None return r def set_session_defaults(session): """ Setup default session """ session['redis_db'] = settings.REDIS_DB session['redis_host'] = settings.REDIS_HOST session['redis_port'] = settings.REDIS_PORT @app.route('/logout/') def logout(): if session: for key in session.keys(): session.pop(key) return redirect(url_for('index')) @app.route('/change_db', methods=['GET', 'POST']) def change_db(): """ View to handle changing the redis db. Make sure val is an int and within the redis db range. """ if request.method == 'POST': try: db = int(request.form['redis_db']) except ValueError: return redirect(url_for('index')) if db in xrange(0,10): session['redis_db'] = db flash('Redis DB changed to ' + str(db)) return redirect(request.referrer) @app.route('/setup/', methods=['GET', 'POST']) def setup(): """ If a connection error with Redis occurs, users will be redirected here to setup the connection information. """ if request.method == 'POST': host = request.form['host'] or settings.REDIS_HOST password = request.form['password'] try: port = int(request.form['port']) except ValueError: port = settings.REDIS_PORT flash('Port number must be an integer. Default used.') session['redis_host'] = host session['redis_port'] = port session['redis_password'] = password return redirect(url_for('index')) return render_template('setup.html') # Static views @app.route('/info/') def info(): """ View for info about your redis set up. """ r = get_redis_connection(session) if not r: return redirect(url_for('setup')) info = r.info().items() return render_template('info.html', info=info) @app.route('/') def index(): """ All available keys. """ if not session.has_key('redis_db'): set_session_defaults(session) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) return render_template('index.html') @app.route('/keys') @app.route('/keys/<int:amount>') def keys(amount=500): """ Get available keys. """ if not session.has_key('redis_db'): set_session_defaults(session) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) total_keys = len(r.keys()) initial_keys = r.keys()[:amount] return jsonify(total_keys=total_keys, initial_keys=initial_keys) @app.route('/key/<key>') def key(key): """ Info for the key. """ r = get_redis_connection(session) if not r: return redirect(url_for('setup')) if r.exists(key): rtype = r.type(key) if rtype == 'hash': output = r.hgetall(key) elif rtype == 'set': output = r.smembers(key) elif rtype == 'zset': output = r.zrange(key, 0, -1, withscores=True) elif rtype == 'list': output = [r.lindex(key, n) for n in xrange(r.llen(key))] else: output = r.get(key) return render_template('key.html', rtype=rtype, key=key, output=output, ttl=r.ttl(key)) else: return render_template('no_key.html', key=key) # Read/write views @app.route('/key/save/<key>', methods=['POST']) def save(key): """ Update the value of a key. """ r = get_redis_connection(session) if not r: return redirect(url_for('setup')) rtype = r.type(key) value = request.form['value'] if rtype == 'hash': value = request.form['value'].strip("{}") values = [k.split(':', 1) for k in value.split(',')] r.delete(key) for k, v in values: r.hset(key, k.strip("' "), v.strip("' ")) elif rtype == 'set': value = request.form['value'].strip("set([])") r.delete(key) for k in value.split(','): r.sadd(key, k.strip(" '\"")) elif rtype == 'list': value = request.form['value'].strip("[]") r.delete(key) for k in value.split(','): r.rpush(key, k.strip().strip("'")) elif rtype == 'zset': value = request.form['value'].strip('[]') regex = re.compile('(?P<key>\(.*\))(?P<comma>,\s)(?P<value>\(.*\))') matches = re.search(regex, value) values = [match for match in matches.groups() if match != ', '] values_list = [k.split() for k in values] r.delete(key) for k, v in values_list: k, v = k.strip("(' ,)"), v.strip("(' ,)") r.zadd(key, k, v) elif rtype == 'string': r.set(key, value) key_changed = 'false' if request.form['key_name'] != request.form['saved_key_name']: r.rename(key, request.form['key_name']) key = request.form['key_name'] key_changed = 'true' return jsonify( flash=key + ' was saved successfully', value=value, key=key, key_changed=key_changed ) @app.route('/key/new/string', methods=['GET', 'POST']) def new_string(): form = StringForm(request.form or None) if form.validate_on_submit(): key = request.form['key_name'] value = request.form['key_value'] ttl = int(request.form['key_ttl']) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) if r.exists(key): flash('%s already exists, edit it below' % key) return redirect('#%s' % key) try: r.set(key, value) if ttl and ttl != 0: r.expire(key, ttl) flash('%s was saved successfully.' % key) return redirect('#%s' % key) except: return jsonify(flash=key + ' failed to save.') else: return render_template('new_string.html', form=form) @app.route('/key/new/set', methods=['GET', 'POST']) def new_set(): """ View for creating a new set/member """ form = ListSetForm(request.form or None) if form.validate_on_submit(): key = request.form['key_name'] ttl = int(request.form['key_ttl']) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) for m in [k for k in request.form.keys() if k.startswith('member')]: r.sadd(key, request.form[m]) if ttl and ttl != 0: r.expire(key, ttl) flash('%s was created.' % key) return redirect('#%s' % key) return render_template('new_set.html', form=form) @app.route('/key/new/list', methods=['GET', 'POST']) def new_list(): """ View for creating a new list with members. """ form = ListSetForm(request.form or None) if form.validate_on_submit(): key = request.form['key_name'] ttl = int(request.form['key_ttl']) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) for m in [k for k in request.form.keys() if k.startswith('member')]: r.rpush(key, request.form[m]) if ttl and ttl != 0: r.expire(key, ttl) flash('%s was created.' % key) return redirect('#%s' % key) return render_template('new_list.html', form=form) @app.route('/key/new/hash', methods=['GET', 'POST']) def new_hash(): """ View for creating a new list with members. """ form = HashForm(request.form or None) if form.validate_on_submit(): key = request.form['key_name'] ttl = int(request.form['key_ttl']) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) for m in [k for k in request.form.keys() if k.startswith('member_name')]: v = re.sub('name', 'value', m) r.hset(key, request.form[m], request.form[v]) if ttl and ttl != 0: r.expire(key, ttl) flash('%s was created.' % key) return redirect('#%s' % key) return render_template('new_hash.html', form=form) @app.route('/key/new/zset', methods=['GET', 'POST']) def new_zset(): """ View for creating a new zset """ form = ZSetForm(request.form or None) if form.validate_on_submit(): key = request.form['key_name'] ttl = int(request.form['key_ttl']) r = get_redis_connection(session) if not r: return redirect(url_for('setup')) for m in [k for k in request.form.keys() if k.startswith('member_name')]: v = re.sub('name', 'score', m) r.zadd(key, request.form[m], float(request.form[v])) if ttl and ttl != 0: r.expire(key, ttl) flash('%s was created.' % key) return redirect('#%s' % key) return render_template('new_zset.html', form=form) @app.route('/key/delete/<key>', methods=['GET']) def delete(key): """ Delete key """ r = get_redis_connection(session) if not r: return redirect(url_for('setup')) if r.exists(key): r.delete(key) return jsonify(flash="Key '" + key + "' was deleted successfully") else: return jsonify(flash="Key '" + key + "' was not found in Redis") @app.route('/search/<string>', methods=['GET']) @app.route('/search/', methods=['GET']) def search(string=None): """ Find keys matching a string. """ r = get_redis_connection(session) if not r: return redirect(url_for('setup')) if string: search_string = '*' + string + '*' else: search_string = '*' return jsonify(keys=r.keys(pattern=search_string)) if __name__ == '__main__': app.run()
""" Astra-Viso world object module. """ from __future__ import division import numpy as np from scipy.integrate import ode from astraviso import pointingutils class WorldObject: """ World object class. """ def __init__(self, name="Object1"): """ WorldObject class initialization. Parameters ---------- name : str, optional User-defined object name. Returns ------- object : WorldObject() Default WorldObject instance. Examples -------- >>> obj = WorldObject() """ # Allocate settings variable self.__settings = {} # Name self.__settings["name"] = name # Object name # Time self.__settings["epoch"] = "1 Jan 2017 00:00:00.000" #self.__settings["epoch_format"] = "seconds" # Type of epoch # Attitude dynamics self.pointing_fcn = None self.set_pointing_preset("kinematic", initial_quaternion=np.array([0, 0, 0, 1]), \ initial_angular_rate=np.array([0, 0, 0])) # Position dynamics self.position_fcn = None self.set_position_preset("kinematic", initial_position=np.array([0, 0, 1]), \ initial_velocity=np.array([0, 0, 0])) # Visible intensity function self.vismag_fcn = None self.set_vismag_preset("constant", vismag=-1) def set_pointing_fcn(self, fcn, mode, initial_state=None, integrator="dopri5", **ode_args): """ Set internal pointing dynamics. Accepts both ODEs and explicit functions of time. Any number of states are allowed as long as the first four elements correspond to the quaternion attitude parameterization. The scalar component of the quaternion should be the fourth element. Parameters ---------- fcn : function Input pointing function. mode : str String descripting the type of function input. Options are "ode" and "explicit". initial_state : ndarray, optional Optional array describing the initial pointing state. Required for "ode" mode only. integrator : str, optional Integrator to use for "ode" mode. Default is "dopri5". See documentation for scipy.integrate.ode for valid settings. Returns ------- None See Also -------- WorldObject.set_pointing_preset, WorldObject.get_pointing Notes ----- For "ode" mode, any keyword arguments after "integrator" will pass directly into the ode.set_integrator. See scipy.integrate.ode for valid settings. Examples -------- >>> obj = WorldObject() >>> fcn = lambda t, state: [0, 0, 0, 0, 0, 0, 0] >>> obj.set_pointing_fcn(fcn, "ode", np.array([0, 0, 0, 1, 0, 0, 0])) >>> obj.get_pointing(1) array([ 0., 0., 0., 1.]) """ # Verify input mode if mode.lower() not in ["ode", "explicit"]: raise ValueError("Invalid pointing mode:" + mode) # Handle ODE option if mode.lower() == "ode": # Define defaults, import user settings kwargs = {"atol" : 1e-9, \ "rtol" : 1e-9, \ "max_step" : 1e-3, \ "nsteps" : 1e8} kwargs.update(ode_args) # Set up integrator and store ode ode_fcn = ode(fcn) ode_fcn.set_integrator(integrator, **kwargs) ode_fcn.set_initial_value(initial_state, 0) pointing_fcn = ode_fcn.integrate # Handle explicit option elif mode.lower() == "explicit": # Set function pointing_fcn = fcn # Set internal pointing function self.pointing_fcn = pointing_fcn def set_pointing_preset(self, preset, **kwargs): """ Set internal pointing dynamics to pre-defined attitude function. Current options are: "static" -- static pointing direction "kinematic" -- rigidy-body kinematic motion with a constant angular rate. Parameters ---------- preset : str Name of chosen preset. initial_state : ndarray, optional Optional 7-element array describing the initial pointing state. Required as keyword argument for the "kinematic" preset. initial_quaternion : ndarray, optional Array (4 elements) describing the initial quaternion. Required as a keyword argument for the "static" and "kinematic" presets. initial_angular_rate : ndarray, optional Array (3 elements) describing the initial angular rate of the object. Measured in radians per second. Required as a keyword argument for the "kinematic" preset. Returns ------- None See Also -------- WorldObject.set_pointing_fcn, WorldObject.get_pointing Notes ----- Uses default integrator values. For more fine-grained control, use WorldObject.set_pointing_fcn. Examples -------- >>> obj = WorldObject() >>> obj.set_pointing_preset("kinematic", ... initial_quaternion=np.array([0, 0, 0, 1]), ... initial_angular_rate=np.array([0, 0, 0])) """ # Handle static option if preset.lower() == "static": # Check for missing input if "initial_quaternion" not in kwargs: raise ValueError("Must provide the following keyword arguments for this preset: \ 'initial_quaternion'") # Build lambda function function = lambda t: kwargs["initial_quaternion"] # Set function self.set_pointing_fcn(function, "explicit") # Rigid body kinematic option elif preset.lower() == "kinematic": # Check for missing input if "initial_quaternion" not in kwargs or "initial_angular_rate" not in kwargs: raise ValueError("Must provide the following keyword arguments for this preset: \ 'initial_quaternion', initial_angular_rate") # Build lambda function function = lambda t, state: pointingutils.rigid_body_kinematic(state[0:4], state[4:]) # Set function initial_state=np.hstack((kwargs["initial_quaternion"], kwargs["initial_angular_rate"])) self.set_pointing_fcn(function, "ode", initial_state) # Handle invalid preset else: raise NotImplementedError("Selected preset not supported.") def get_pointing(self, time, mode="quaternion"): """ Get pointing parameters at a given time. Supports both quaternion and direction-cosine-matrix parameterizations. Parameters ---------- time : float or ndarray Desired time(s) to extract pointing information. mode : str, optional Desired output parameterization. Supports "quaternion" and "dcm". Default is "dcm". Returns ------- pointing : ndarray Array containing pointing data corresponding to each input time. For quaternion output, the array is Nx4 where N is the number of times requested. For DCM output, the array is Nx3x3. See Also -------- WorldObject.set_pointing_fcn, WorldObject.set_pointing_preset Examples -------- >>> obj = WorldObject() >>> obj.set_pointing_preset("kinematic", np.array([0,0,0,1,0,0,0])) >>> obj.get_pointing(1) array([ 0., 0., 0., 1.]) """ # Ensure that time is a list if not isinstance(time, list) and not isinstance(time, np.ndarray): time = [time] num = len(time) # Collect pointing values for quaternion mode if mode == "quaternion": # Allocate output = np.zeros((num, 4)) # Iterate for idx in range(num): output[idx, :] = self.pointing_fcn(time[idx])[0:4] # Collect pointing values for dcm mode elif mode == "dcm": # Allocate output = np.zeros((num, 3, 3)) # Iterate for idx in range(num): output[idx, :, :] = pointingutils.quaternion2dcm(self.pointing_fcn(time[idx])[0:4]) # Handle invalid mode else: raise NotImplementedError("Unsupported pointing type. Options: quaternion, dcm.") # Remove unnecessary dimension if num == 1: output = np.squeeze(output) # Return values return output def set_position_fcn(self, fcn, mode, initial_state=None, integrator="dopri5", **ode_args): """ Set internal position dynamics. Accepts both ODEs and explicit functions of time. Any number of states are allowed as long as the first three elements correspond to the intertial position. Parameters ---------- fcn : function Input position function. Function must be of the form f(t, state) where "state" is a vector. mode : str String descripting the type of function input. Options are "ode" and "explicit". initial_state : ndarray, optional Optional array describing the initial state. Required for "ode" mode only. integrator : str, optional Integrator to use for "ode" mode. Default is "dopri5". See documentation for scipy.integrate.ode for valid settings. Returns ------- None See Also -------- WorldObject.set_position_preset, WorldObject.get_position Notes ----- For "ode" mode, any keyword arguments after "integrator" will pass directly into the ode.set_integrator. See scipy.integrate.ode for valid settings. Examples -------- >>> obj = WorldObject() >>> fcn = lambda t, state: [0, 0, 0] >>> obj.set_position_fcn(fcn, "ode", np.array([1, 1, 1])) >>> obj.get_position(1) array([ 1., 1., 1.]) """ # Check for valid input if not callable(fcn): raise ValueError("Must provide callable function.") # Handle ODE option if mode.lower() == "ode": # Define defaults, import user settings kwargs = {"atol" : 1e-9, \ "rtol" : 1e-9, \ "max_step" : 1e-3, \ "nsteps" : 1e8} kwargs.update(ode_args) # Set up integrator and store ode ode_fcn = ode(fcn) ode_fcn.set_integrator(integrator, **kwargs) ode_fcn.set_initial_value(initial_state, 0) position_fcn = ode_fcn.integrate # Handle explicit option elif mode.lower() == "explicit": # Set function position_fcn = fcn # Handle invalid option else: raise ValueError("Invalid position mode:" + mode) # Set internal pointing function self.position_fcn = position_fcn def set_position_preset(self, preset, **kwargs): """ Set internal position dynamics to preset function. Current options are: "static" -- A constant position function. "kinematic" -- simple kinematic motion from an initial position and velocity. Parameters ---------- preset : str Name of chosen preset. initial_position : ndarray, optional Initial object position. Required as keyword argument for the "kinematic" preset. initial_velocity : ndarray, optional Initial object position. Required as keyword argument for the "kinematic" preset. Returns ------- None See Also -------- WorldObject.set_position_fcn, WorldObject.get_position Examples -------- >>> obj = WorldObject() >>> obj.set_position_preset("kinematic", ... initial_position=np.ndarray([0, 0, 0]), ... initial_velocity=np.ndarray([0, 0, 0]) >>> obj.position_fcn(1) array([0, 0, 0]) """ # Set static option if preset.lower() == "static": # Check input if "initial_position" not in kwargs: raise ValueError("Must provide the following keyword arguments for this preset: \ 'initial_position'") # Build function & set position_fcn = lambda t: kwargs["initial_position"] self.set_position_fcn(position_fcn, mode="explicit") # Set kinematic option elif preset.lower() == "kinematic": # Check input if "initial_position" not in kwargs or "initial_velocity" not in kwargs: raise ValueError("Must provide the following keyword arguments for this preset: \ 'initial_position', 'initial_velocity'") # Build function & set position_fcn = lambda t: kwargs["initial_position"] + kwargs["initial_velocity"]*t self.set_position_fcn(position_fcn, mode="explicit") # Handle invalid option else: raise NotImplementedError("Invalid preset option.") def get_position(self, time): """ Get object position at a particular time. Parameters ---------- time : float or ndarray Desired time to extract position information. Returns ------- position : ndarray Position vector of the WorldObject at given time in intertial space. See Also -------- WorldObject.set_position_fcn, WorldObject.set_position_preset Examples -------- >>> obj = WorldObject() >>> obj.set_position_preset("constant", np.array([0, 0, 0])) >>> obj.get_position(1) array([0, 0, 0]) """ # Compute position return self.position_fcn(time) def set_vismag_fcn(self, fcn): """ Set internal object visual magnitude model. Parameters ---------- fcn : function Input visual magnitude function. Output must be scalar. See notes for details about the required function format. Returns ------- None See Also -------- WorldObject.set_vismag_preset, WorldObject.get_vismag Notes ----- Function must be of the form: vismag = f(t, observer_position, object_position) Below are two valid function definition templates. def user_fcn(t, observer_position, object_position): ... return vismag user_fcn = lambda t, observer_position, object_position: ... Examples -------- >>> obj = WorldObject() >>> fcn = lambda t, *_: 7 + 2*np.sin(2*np.pi*t/30) # Ignore args after t >>> obj.set_vismag_fcn(fcn) >>> obj.vismag_fcn(0) 7.0 >>> obj.vismag_fcn(7.5) 9.0 """ # Check for valid input if not callable(fcn): raise ValueError("Must provide callable function.") # Set function self.vismag_fcn = fcn def set_vismag_preset(self, preset, **kwargs): """ Set internal visual magnitude model to preset function. Available presets are: "constant" -- static user-defined visual magnitude. "sine" -- sinusoidal visual magnitude. Function of the form: vismag + amplitude*np.sin(2*np.pi*t/frequency) Parameters ---------- preset : str Name of chosen preset. vismag : float, optional Object visual magnitude. Argument required as keyword for "constant" and "sine" preset options. amplitude : float, optional Visual magnitude oscillation amplitude. Argument required as keyword for "sine" preset. frequency : float, optional Visual magnitude oscillation frequency. Measured in seconds. Argument required as keyword for "sine" preset. Returns ------- None See Also -------- WorldObject.set_vismag_fcn, WorldObject.get_vismag Examples -------- >>> obj = WorldObject() >>> obj.set_vismag_preset("sine", vismag=7, amplitude=2, frequency=30) >>> obj.vismag_fcn(0) 7.0 >>> obj.vismag_fcn(7.5) 9.0 """ # Set constant option if preset.lower() == "constant": # Check input if "vismag" not in kwargs: raise ValueError("Must provide the following keyword arguments for this preset: \ 'vismag'") # Build function & set vismag_fcn = lambda *_: kwargs["vismag"] self.set_vismag_fcn(vismag_fcn) # Set sine option elif preset.lower() == "sine": # Check input if any([ele not in kwargs for ele in ["vismag", "amplitude", "frequency"]]): raise ValueError("Must provide the following keyword arguments for this preset: \ 'vismag', 'amplitude', 'frequency'.") # Build function & set vismag_fcn = lambda t, *_: kwargs["vismag"] + \ kwargs["amplitude"]*np.sin(2*np.pi*t/kwargs["frequency"]) self.set_vismag_fcn(vismag_fcn) # Handle invalid option else: raise NotImplementedError("Invalid preset option.") def get_vismag(self, time, observer_position): """ Get visible magnitude at a particular time. Parameters ---------- time : float or ndarray Desired time to extract visual magnitude information. observer_position : ndarray Array describing the position of the observer in intertial space. Returns ------- vismag : float Visual magnitude of the WorldObject at the given time. See Also -------- WorldObject.set_vismag_fcn, WorldObject.set_vismag_preset Examples -------- >>> obj = WorldObject() >>> obj.set_vismag_preset("sine", vismag=7, amplitude=2, frequency=30) >>> obj.get_vismag(0) 7.0 >>> obj.get_vismag(7.5) 9.0 """ # Compute visual magnitude return self.vismag_fcn(time, observer_position, self.get_position(time)) def relative_to(self, origin_object, time): """ Compute relative position from another WorldObject to self at a given time. Parameters ---------- origin_object : WorldObject Object to compute the relative position from. Returns ------- relative_position : ndarray Array (3-elements) describing the relative position from origin_object to self. See Also -------- WorldObject.set_position_fcn, WorldObject.set_position_preset, WorldObject.get_position Examples -------- >>> obj = WorldObject() >>> obj2 = WorldObject() >>> obj.relative_to(obj2, 0) array([0, 0, 0]) """ # Compute relative position return self.get_position(time) - origin_object.get_position(time) def in_frame_of(self, origin_object, time): """ Compute relative position from another WorldObject to self at a given time in the body frame of the target object. Parameters ---------- origin_object : WorldObject Object to compute the relative position from. Reference frame of the result is the origin_object's body frame. Returns ------- relative_position : ndarray Array (3-elements) describing the relative position from origin_object to self. See Also -------- WorldObject.set_position_fcn, WorldObject.set_position_preset, WorldObject.get_position Examples -------- >>> obj = WorldObject() >>> obj2 = WorldObject() >>> obj.in_frame_of(obj2) array([0, 0, 0]) """ # Compute relative position rel_pos = self.get_position(time) - origin_object.get_position(time) # Rotate into body frame return np.dot(rel_pos, origin_object.get_pointing(time, mode="dcm"))
# -*- coding: utf-8 -*- """modelgrid.py - Deprecated code for interpolating regular or irregular grids of models. Use `scipy.interpolate.RegularGridInterpolator` or `scipy.interpolate.LinearNDInterpolator`. Kept here since it is used by some other (old) packages of mine. """ import numpy as np from scipy.spatial import Delaunay try: import astropy.io.fits as pyfits except (ImportError): import pyfits try: import sklearn.neighbors except(ImportError): pass try: from . import observate except (ImportError): print('Warning - observate not imported, SpecLibrary class unavailable') class ModelLibrary(object): """Class to deal with (irregular) grids of models. Primary attribute is `pars`: a structured array of parameter values of shape (ngrid). Methods are provided to manipulate the parameter structure, and to obtain interpolation weights for specific points within the grid using a variety of methods. """ # force generation of the DT and KDtree in first pass triangle_dirtiness = 1 def __init__(self, pars=None, parnames=None): if pars is not None: self.set_pars(pars, parnames) def set_pars(self, pars, parnames): self.pars = self.structure_array(pars, parnames) self.ngrid = self.pars.shape[0] def add_par(self, value, name, dtype='<f8'): newp = self.structure_array(value, [name], types=[dtype]) self.pars = self.join_struct_arrays([self.pars, newp]) pass def par_names(self): return self.pars.dtype.names def par_range(self, parname, inds=None): range_list = [(np.nanmin(self.pars[inds][p]), np.nanmax(self.pars[inds][p]), p) for p in parname] return range_list def structure_array(self, values, fieldnames, types=None): """Turn a numpy array of floats into a structurd array. :param values: ndarray, shape(nrec, nfield). Values of the structure. :param fieldnames: string sequence of length nfield. A list or string array of parameter names with length NFIELD. :param types: string sequence, optional Format identifiers for each field. Defaults to '<f8'. :returns struct: A numpy structured array. """ # shape coercion values = np.atleast_2d(values) if values.shape[-1] != len(fieldnames): if values.shape[0] == len(fieldnames): values = values.T else: raise ValueError('array and fieldnames do not have consistent shapes!') nobj = values.shape[0] # create the dtype and structured array if types is None: types = ['<f8'] * len(fieldnames) dt = np.dtype(zip(fieldnames, types)) struct = np.zeros(nobj, dtype=dt) for i, f in enumerate(fieldnames): struct[f] = values[..., i] return struct def join_struct_arrays(self, arrays): """From some dudes on StackOverflow. Add equal length structured arrays to produce a single structure with fields from both. :param arrays: A sequence of structured arrays. These must each have the same length. :returns newarray: A single array containing the fields of ``arrays``. """ if False in [len(a) == len(arrays[0]) for a in arrays]: raise ValueError('array lengths do not match.') newdtype = np.dtype(sum((a.dtype.descr for a in arrays), [])) if len(np.unique(newdtype.names)) != len(newdtype.names): raise ValueError('arrays have duplicate fields.') newrecarray = np.empty(len(arrays[0]), dtype=newdtype) for a in arrays: for name in a.dtype.names: newrecarray[name] = a[name] return newrecarray def model_weights(self, target_points, parnames=None, subinds=None, itype='dt', force_triangulation=False, **extras): """Given an array of target coordinates and optionally a list of parnames, return the indices and weights of the model grid points that will interpolate to the target points. :param target_points: ndarray, shape(ntarg, npar) The points to which you want to interpolate. Can be a numpy structured array of length NTARG or a simple numpy array of dimension [NTARG x NPAR], if parnames is also passed. If a numpy structured array, then the field names should correspond to parameters in the `par` structure. 1d arrays (i.e. a single target point) will be automatically upgraded to 2d arrays :param parnames: string sequence A list of the parameter names corresponding to the NPAR dimension of target_points. These names should correspond to parameters in the `par` structure. :param subinds: An optional array identifying the members of the grid to be used for interpolation. :param itype: string (default 'dt') A string giving the type of interpolation to perform. Available options are: * 'dt' - use Delaunay triangulation. This is suitable for irregular grids, though care must be taken that target points are within the convex hull described by the grid * 'idw' - inverse distance weighting to the NPAR nearest neighbors. * 'nearest' - nearest neighbor, as determined by a kd-tree * 'ndlinear' - not yet implemented' :param force_triangulation: bool (default True) Normally the triangulation and kdtree formed from the model grid is stored and reused unless the `triangle_dirtiness' attribute is greater than zero. If this keyword is true, force the triangulation and kdtree to be regenerated regardless of 'dirtiness' :returns inds: ndarray, shape(ntarg, nind) An array that indexes the model parameter grid to give the interpolating models for each target point. It has shape (NTARG, NIND) where nind is NPAR+1 for Delaunay triangulation, NPAR for 'idw', and 1 for 'nearest' :returns weights: ndarray, shape(ntarg, nind) The interpolation weight for each model grid point specified by inds, and of identical shape. The weights summed along the NIND direction will always be 1. Thus for 'nearest' weights is always 1. """ # deal with recarray input if parnames is None: parnames = target_points.dtype.names targets = np.array(target_points.tolist()) else: targets = target_points targets = np.atleast_2d(targets) # If necessary rebuild the model delauynay triangulation and # KDTree. Pull the grid points out of the model record data # and make an (nmodel,ndim) array of model grid parameter # values. Need to loop to make sure order is correct. # if subinds is not None: # force_triangulation = True # print( subinds) self.triangle_dirtiness += force_triangulation if self.triangle_dirtiness > 0: self.refresh_graphs(parnames, subinds=subinds) # pass the result to weightsDT if itype.lower() == 'dt': inds, weights = self.weightsDT(targets) elif itype.lower() == 'idw': inds, weights = self.weights_kNN_inverse_dist(targets, k=targets.shape[1]) elif itype.lower() == 'nearest': inds, weights = self.weights_kNN_inverse_dist(targets, k=1) return inds, weights def refresh_graphs(self, parnames, subinds=None): """Given parameter names and optionally specific model indices, build a a Delaunay triangulation and a KDTree for the model points. """ model_points = [np.squeeze(self.pars[subinds][pname]) for pname in parnames] model_points = np.array(model_points).transpose() # (nmod, ndim) self.graphed_parameters = parnames self.triangle_dirtiness = 0 # Delaunay triangulate self.dtri = Delaunay(model_points) # kdtree try: self.kdt = sklearn.neighbors.KDTree(model_points) except(NameError): pass def weightsDT(self, target_points): """The interpolation weights are determined from barycenter coordinates of the vertices of the enclosing Delaunay triangulation simplex. This allows for the use of irregular Nd grids. See also weights_1DLinear and weights_kNN_inverse_distance. :param target_points: ndarray, shape(ntarg,npar) The coordinates to which you wish to interpolate. :returns inds: ndarray, shape(ntarg,npar+1) The model indices of the interpolates. :returns weights: narray, shape (ntarg,npar+1) The weights of each model given by ind in the interpolates. """ # Find the encompassing (hyper)triangle(s) for the desired # points, given a delauynay triangulation ndim = target_points.shape[-1] # Output triangle_inds is an (ntarg) array of simplex indices triangle_inds = self.dtri.find_simplex(target_points) # And get model indices of (hyper)triangle vertices. inds has # shape (ntarg,ndim+1) inds = self.dtri.vertices[triangle_inds, :] # get the barycenter coordinates through matrix multiplication # and dimensional juggling tmp = self.dtri.transform[triangle_inds, ndim, :] bary = np.dot(self.dtri.transform[triangle_inds, :ndim, :ndim], (target_points - tmp).reshape(-1, ndim, 1)) oned = np.arange(triangle_inds.shape[0]) # ok. in np 1.7 can add an axis to squeeze bary = np.atleast_2d(np.squeeze(bary[oned, :, oned, :])) # the last bary coordinate is 1-sum of the other coordinates last = 1 - bary.sum(axis=-1) weights = np.hstack((bary, last[:, np.newaxis])) outside = (triangle_inds == -1) weights[outside, :] = 0 return inds, weights # loop implementation of the above for clarity # npts = triangle_inds.shape[0] # bary = np.zeros([npts,ndim+1]) # for i in xrange(npts): # bary[i,:-1]= np.dot( dtri.transform[triangle_inds[0],:ndim,:ndim], # (target_points-dtri.transform[triangle_inds[i],ndim,:]) def weights_kNN_inverse_dist(self, target_points, k=1): """The interpolation weights are determined from the inverse distance to the k nearest neighbors. :param target_points: ndarray, shape(ntarg,npar) The coordinates to which you wish to interpolate. :param k: The number of nearest neighbors to use. :returns inds: ndarray, shape(ntarg,npar+1) The model indices of the interpolates. :returns weights: narray, shape (ntarg,npar+1) The weights of each model given by ind in the interpolates. """ dists, inds = self.kdt.query(target_points, k=k, return_distance=True) if k == 1: return inds, np.ones(inds.shape) weights = 1 / dists weights[np.isinf(weights)] = large_number weights = weights/weights.sum(axis=-1) return inds, weights def weights_1DLinear(self, model_points, target_points, extrapolate=False, left=0.0, right=0.0): """The interpolation weights are determined from 1D linear interpolation. :param model_points: ndarray, shape(nmod) The parameter coordinate of the available models :param target_points: ndarray, shape(ntarg) The coordinate to which you wish to interpolate :returns inds: ndarray, shape(ntarg,2) The model indices of the interpolates :returns weights: narray, shape (ntarg,2) The weights of each model given by ind in the interpolates. """ order = model_points.argsort() mod_sorted = model_points[order] x_new_indices = np.searchsorted(mod_sorted, target_points) x_new_indices = x_new_indices.clip(1, len(mod_sorted) - 1).astype(int) lo = x_new_indices - 1 hi = x_new_indices x_lo = mod_sorted[lo] x_hi = mod_sorted[hi] width = x_hi - x_lo w_lo = (x_hi - target_points) / width w_hi = (target_points - x_lo) / width if extrapolate is False: # find places where target is above or below the model # range. above_scale = w_lo < 0 below_scale = w_hi < 0 # set the indices to be indentical in these cases lo[above_scale] = hi[above_scale] hi[below_scale] = lo[below_scale] # make the combined weights sum to ``left`` or ``right`` w_lo[above_scale] = 0 w_hi[above_scale] = left w_hi[below_scale] = 0 w_lo[below_scale] = right inds = np.vstack([lo, hi]).T weights = np.vstack([w_lo, w_hi]).T return inds, weights def nearest_index(self, array, value): return (np.abs(array-value)).argmin(axis=-1) class SpecLibrary(ModelLibrary): """Class to operate on spectral libraries. Methods are provided to interpolate the available model spectra (stored as a structured parameter array and a spectral array) to a certain set of parameters. Subclasses are used to return the actual model spectrum given a set of model parameters. Primary attributes are pars, spectra, wavelength. Spectra should be of shape (NOBJ x NWAVE) """ flux_unit = 'erg/s/cm^2/AA of 1solar mass at 10pc' def __init__(self): pass def spectra_from_pars(self): """This should take a numpy structured array of parameters of length nobj and return an (nobj, nwave) array of spectra """ raise(NotImplementedError) def generateSEDs(self, pars, filterlist, wave_min=90, wave_max=1e7, keepspec=False, intspec=False, attenuator=None, **extras): """ :returns sed: ndarray of shape (nobj,nfilter) :returns lbol: ndarray of shape (nobj) :returns outspectra: ndarray of shape (nobj,nwave) """ # don't use too much memory at once maxmod = 1e7/self.wavelength.shape[0] ngrid = pars.shape[0] sed = np.zeros([int(ngrid), len(filterlist)]) lbol = np.zeros(int(ngrid)) outspectra = None if keepspec: outspectra = np.zeros([ngrid, self.wavelength.shape[0]]) elif intspec: outspectra = np.zeros(self.wavelength.shape[0]) # split big model grids to avoid memory constraints i = 0 while (i*maxmod <= ngrid): s1, s2 = int((i)*maxmod), int(np.min([(i+1)*maxmod-1, ngrid])) spec = self.spectra_from_pars(pars[int(s1):int(s2)], **extras) if attenuator is not None: spec = attenuator.attenuate_spectrum(self.wavelength, spec, pars[s1:s2], **extras) sed[s1:s2, :] = observate.getSED(self.wavelength, spec, filterlist) lbol[s1:s2] = observate.Lbol(self.wavelength, spec, wave_min=wave_min, wave_max=wave_max) i += 1 if keepspec: outspectra[s1:s2, :] = spec elif intspec: outspectra += spec.sum(axis=0) return sed, lbol, outspectra def interpolate_to_pars(self, target_points, parnames=None, subinds=None, itype='dt', **extras): """Method to obtain the model spectrum for a given set of parameter values via interpolation of the model grid. The interpolation weights are determined from barycenters of a Delaunay triangulation or nLinear interpolation or k-nearest-neighbor inverse distance. :param target_points: ndarray, shape (ntarg,ndim) Desired model parameters. Can also be a structured array with fields named for the model parameters. See ModelLibrary.model_weights() :param subinds: ndarray Indices of the model pars structure to use in interpolation. Allows for only portions of the model library to be used. :param parnames: string sequence, len (ndim) The names of the model library parameters :returns spectra: ndarray, shape (ntarg, nwave) The interpolated spectra. """ inds, weights = self.model_weights(target_points, parnames=parnames, itype=itype, subinds=subinds, **extras) if subinds is not None: inds = subinds[inds] return self.combine_weighted_spectra(inds, weights) def combine_weighted_spectra(self, inds, weights): """Weight self.spectra using broadcasting, then sum the weighted spectra. Should add a switch to sum the weights first so that the output is a single spectrum. :param inds: shape (ntarg,nint) Indices of the models to sum :param weights: shape (ntarg, nint) Weights of the models corresponding to inds. :returns spec: shape (nwave,ntarg). The weighted sum. """ tmp = weights * (self.spectra[inds].transpose(2, 0, 1)) return (tmp.sum(axis=2)).T def read_model_from_fitsbinary(self, filename, parnames, wavename='WAVE', fluxname='F_LAMBDA'): """Read spectra from a fits binary table in a certain format. """ # if os.ispath(filename) is False: # raise IOError('read_model_from_fitsbinary: # ',filename,' does not exist') fits = pyfits.open(filename) # Parse the FITS recarray and assign ModelGrid parameter, # spectra, and wavelength attributes wavelength = fits[1].data[0][wavename] spectra = fits[1].data[fluxname] # (nmod,nwave) # Need to loop over pars and restruct to get the desired # order. Really? Not really. order is unimportant pars, partype = [], [] for pname in parnames: pars.append(np.squeeze(fits[1].data[pname])) partype.append(fits[1].data[pname].dtype) # fix # list ->(nmod, npar) -> Structured array pars = self.structure_array(np.array(pars).transpose(), parnames, types=partype) fits.close() return wavelength, spectra, pars
""" test_remote.py ~~~~~~~~~~~~ clear; python -m unittest discover -v """ import asyncore import boto.ec2 import logging import random import socket import pprint import sys import time import unittest import PynamoDB.util as util from PynamoDB.client import PynamoClient from scripts import fabfile logging.basicConfig(filename='./logs/pynamo.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ) class TestRemoteCommands(unittest.TestCase): def setUp(self): self.external_port = 50000 self.internal_port = 50001 self.num_replicas = 3 self.clients = [] self.region="us-west-1" self.conn = boto.ec2.connect_to_region(self.region) self.instances = [instance for reservation in self.conn.get_all_reservations() for instance in reservation.instances] self.public_dns_names = [instance.public_dns_name for instance in self.instances] def tearDown(self): for client in self.clients: try: client._immediate_shutdown() except: pass def run_client(self, n): for _ in xrange(n): asyncore.loop(timeout=0.001, count=1) def _test_put_single(self): hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() def _test_put_1000(self): """ Puts 1000 keys.""" n=1000 for public_dns_name in self.public_dns_names: client=PynamoClient(public_dns_name, int(self.external_port)) for _ in xrange(n/len(self.public_dns_names)): key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) self.run_client(10) self.run_client(1000) self.clients.append(client) for client in self.clients: self.assertEqual(len(self.client.replies), n/len(self.public_dns_names)) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') def _test_get_single(self): """ Puts, then gets key.""" hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) client.get(key) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') self.assertEqual(reply['value'], value) client._immediate_shutdown() def _test_get_1000(self): """ Puts, then gets n keys. ----- Ran 1 test in 136.385s """ n = 1000 previously_put_key_values = {} for public_dns_name in self.public_dns_names: client=PynamoClient(public_dns_name, int(self.external_port)) for _ in xrange(n/len(self.public_dns_names)): key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) previously_put_key_values[key] = value self.run_client(10) self.run_client(1000) client._immediate_shutdown() time.sleep(10) hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) for key, value in previously_put_key_values.items(): client.get(key) self.run_client(100) self.run_client(1000) num_correct = 0 for i, request in enumerate(client.requests): key = request['key'] value = client.replies[i]['value'] if value == previously_put_key_values[key]: num_correct +=1 else: print key, value, previously_put_key_values[key] print len(client.requests), num_correct self.assertEqual(num_correct, n) client._immediate_shutdown() def _test_put_delete_single(self): """ Puts, then deletes key.""" #put hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() #delete hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) client.delete(key) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() def _test_put_delete_1000(self): """ Puts, then gets n keys. ----- 152.037s """ n = 1000 previously_put_key_values = {} for public_dns_name in self.public_dns_names: client=PynamoClient(public_dns_name, int(self.external_port)) for _ in xrange(n/len(self.public_dns_names)): key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) previously_put_key_values[key] = value self.run_client(10) self.run_client(1000) client._immediate_shutdown() time.sleep(10) hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) for key in previously_put_key_values: client.delete(key) self.run_client(100) self.run_client(1000) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() def _test_put_delete_get_single(self): """ Puts, deletes, then tries to get deleted key.""" #put hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() #delete hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) client.delete(key) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() #get hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(300) client.get(key) self.run_client(300) for reply in client.replies: self.assertEqual(reply['error_code'], '\x01') self.assertIsNone(reply['value']) client._immediate_shutdown() def _test_put_delete_get_1000(self): n = 1 previously_put_key_values = {} for public_dns_name in self.public_dns_names: client=PynamoClient(public_dns_name, int(self.external_port)) for _ in xrange(n/len(self.public_dns_names)): key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) previously_put_key_values[key] = value self.run_client(10) self.run_client(1000) client._immediate_shutdown() time.sleep(10) # deletes hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) for key in previously_put_key_values: client.delete(key) self.run_client(100) self.run_client(1000) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) for key in previously_put_key_values: client.delete(key) self.run_client(100) self.run_client(1000) for reply in client.replies: self.assertEqual(reply['error_code'], '\x01') self.assertIsNone(reply['value']) client._immediate_shutdown() class TestRemoteRollingFailure(unittest.TestCase): def setUp(self): self.external_port = 50000 self.internal_port = 50001 self.clients = [] self.num_replicas = 3 self.region="us-west-1" self.conn = boto.ec2.connect_to_region(self.region) self.instances = [instance for reservation in self.conn.get_all_reservations() for instance in reservation.instances if instance.state in ['running']] self.public_dns_names = [instance.public_dns_name for instance in self.instances] def tearDown(self): for client in self.clients: try: client._immediate_shutdown() except: pass def run_client(self, n): for _ in xrange(n): asyncore.loop(timeout=0.001, count=1) def _test_shutdown(self): hostname=random.choice(self.public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) client.shutdown() self.run_client(1000) for reply in client.replies: self.assertEqual(reply['error_code'], '\x00') client._immediate_shutdown() time.sleep(10) # with self.assertRaises(socket.error) as context: print hostname with self.assertRaises(socket.error) as context: client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) client.put("key", "value") self.run_client(1000) def _test_rolling_failure_announced_shutdown(self): n = 10 previously_put_key_values = {} for public_dns_name in self.public_dns_names: client=PynamoClient(public_dns_name, int(self.external_port)) for _ in xrange(n/len(self.public_dns_names)): key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) previously_put_key_values[key] = value self.run_client(100) self.run_client(1000) client._immediate_shutdown() time.sleep(10) #rolling failures public_dns_names = self.public_dns_names for _ in xrange(len(public_dns_names) - self.num_replicas): # randomly select one node and pop it hostname = random.choice(public_dns_names) public_dns_names.remove(hostname) print hostname # shut down randomly selected node client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) client.shutdown() self.run_client(1000) client._immediate_shutdown() time.sleep(20) #randomly select other node and check if all values are properly returned hostname = random.choice(public_dns_names) client=PynamoClient(hostname, int(self.external_port)) self.run_client(1000) for key, value in previously_put_key_values.items(): client.get(key) self.run_client(200) self.run_client(500) num_correct = 0 for i, request in enumerate(client.requests): key = request['key'] value = client.replies[i]['value'] if value == previously_put_key_values[key]: num_correct +=1 print n, num_correct client._immediate_shutdown() def test_rolling_failure_unannounced_termination(self): n = 100 previously_put_key_values = {} for public_dns_name in self.public_dns_names: client=PynamoClient(public_dns_name, int(self.external_port)) for _ in xrange(n/len(self.public_dns_names)): key = util.get_hash(str(random.random())) value = util.get_hash(key) client.put(key, value) previously_put_key_values[key] = value self.run_client(100) self.run_client(1000) client._immediate_shutdown() time.sleep(10) #rolling failures for _ in xrange(len(self.instances) - self.num_replicas): num_correct = 0 # randomly select one node and pop it instance = random.choice(self.instances) self.instances.remove(instance) instance_dns = instance.public_dns_name instance_id = instance.id print "terminating: {}".format(instance_dns) # shut down randomly selected node instance.reboot() time.sleep(30) #randomly select other node and check if all values are properly returned for key, value in previously_put_key_values.items(): instance_dns = random.choice(self.instances).public_dns_name client=PynamoClient(instance_dns, int(self.external_port)) self.run_client(100) client.get(key) self.run_client(100) time.sleep(5) self.run_client(100) try: if client.replies[0]['value'] == previously_put_key_values[key]: num_correct +=1 except: pass client._immediate_shutdown() print "num_instances, n, num_correct: {}, {}, {}".format(len(self.instances), n, num_correct) def _test(self): pass
#!/usr/bin/env python # coding=UTF-8 # Be warned, in Ubuntu 12.04 the fitting of the lognorm and genextreme distribution doesn't work with the default scipy 0.9.0 package # Either update your scipy installation to version 0.14.0 # or install anaconda (from http://continuum.io/downloads) and prepend to your PATH the anaconda installation folder (usually ~/anaconda/bin) import argparse import ntpath import sys import numpy as np from scipy import stats import matplotlib.pyplot as plt import matplotlib.ticker as tk def str2bool(v): return v.lower() in ("yes", "true", "t", "1") if __name__ == "__main__": ########################################################################## # args parser = argparse.ArgumentParser(description='Fits probability distributions to data coming from a csv file and outputs their graphs') parser.register('type', 'bool', str2bool) parser.add_argument('-i', metavar='INPUT_FILE', type=str, required=True, help='CSV input file') parser.add_argument('-o', metavar='OUTPUT_FILE_NAME', type=str, required=False, default='results', help='Output file name (exports in svg, eps and pdf)') parser.add_argument('-c', metavar='FILE_COLUNM', type=int, required=False, default=0, help='CSV data column to use') parser.add_argument('-t', metavar='GRAPH_TITLE', type=str, required=False, default='Probability distributions', help='Graph title') parser.add_argument('-x', metavar='GRAPH_X_AXIS_LABEL', type=str, required=False, default='Values', help='Graph x axis label') parser.add_argument('-z', metavar='FILE_VALUE_DELIMITER', type=str, required=False, default=',', help='Value delimiter in each line') parser.add_argument('-b', metavar='BIN_WIDTH', type=float, required=False, default=-1, help='Histogram bin width. If < 0, it will use the number of bins specified with -n') parser.add_argument('-n', metavar='NUMBER_OF_BINS', type=float, required=False, default=100, help='Number of bins to use (only used if -b is < 0)') parser.add_argument('-e', metavar='VALUES_SCALE', type=float, required=False, default=1, help='X axis scale') parser.add_argument('-m', metavar='AXIS_MAX_LOCATOR_WIDTH', type=int, required=False, default=-1, help='Width for the major tick locator. If <= 0 lets matplotlib choose one') parser.add_argument('-l', metavar='AXIS_MAX_LOCATOR_LINEAR', type=int, required=False, default=11, help='Divides the input range in -l major ticks. If <= 0 lets matplotlib choose one. Overrides -m') parser.add_argument('-a', metavar='AXIS_MAX_LOCATOR_SUB_SIVISIONS', type=int, required=False, default=10, help='Number of major tick subdivisions for minor locator. If <= 0 lets matplotlib choose one') parser.add_argument('-w', metavar='PLOT_LINE_WIDTH', type=float, required=False, default=0.25, help='Plot line width') parser.add_argument('-g', metavar='GRAPH_STD_DEV_VALUES', type=int, required=False, default=4, help='Show only values that are lower than -g standard deviations (from the mean of the fitted normal distribution). If 0, shows all values. Note: this doesnt affect the estimated distribution values (it only trims the displayed values)') parser.add_argument('-r', metavar='DISPLAY_POSITIVE_VALUES_ONLY', type='bool', required=False, default=False, help='Resets the display axis so that only positive values are shown') parser.add_argument('-s', metavar='SAVE_GRAPH', type='bool', required=False, default=True, help='Save graph to files using the name prefix specified with -o') parser.add_argument('-q', metavar='ADD_FILE_EXTENSION_TO_PATH', type='bool', required=False, default=False, help='Prepend to path the extension of the output file') parser.add_argument('-d', metavar='DISPLAY_GRAPH', type='bool', required=False, default=False, help='Show graph') parser.add_argument('-f', metavar='FORMATED_OUTPUT', type='bool', required=False, default=True, help='Console output in readable format or in csv style (if False)') args = parser.parse_args() if args.z == 'sp': args.z = ' ' ########################################################################## # input data = np.loadtxt(args.i, dtype=float, delimiter=args.z, skiprows=1, usecols=(args.c,)) if args.e != 1: data *= args.e ########################################################################## # graph setup fig, ax = plt.subplots(figsize=(19.2, 10.8), dpi=100) bin_width = args.b x_min = np.min(data) x_max = np.max(data) if args.n <= 0: args.n = 100 if args.b <= 0: bin_width = (x_max - x_min) / args.n n, bins = np.histogram(data, args.n, range=(x_min, x_max), density=1) max_bin_count = -sys.maxint max_bin_count_x = -sys.maxint for idx, bin_count in enumerate(n): if bin_count > max_bin_count: max_bin_count = bin_count max_bin_count_x = x_min + (idx * bin_width) + bin_width * 0.5 plt.xlabel(args.x) plt.ylabel('Histogram bin count percentage [0..1] | Probability distribution function value') graph_title = plt.title(args.t, fontsize=16) graph_title.set_y(1.01) plt.minorticks_on() plt.grid(b=True, which='major', color='k', linestyle='--', linewidth=0.30, alpha=0.5) plt.grid(b=True, which='minor', color='k', linestyle=':', linewidth=0.01, alpha=0.2) ########################################################################## # probability distribution fitting output_str = '' output_file_name=ntpath.basename(args.o) if args.f: output_str += '- Distribution fitting for file ' output_str += output_file_name distr_names = ['Normal', 'Log\ Normal', 'Generalized\ Extreme\ Value'] distr_colors = ['-r', '-g', '-b'] normal_ditribution_mean = 0 normal_ditribution_std_dev = 0 for idx, distr in enumerate([stats.norm, stats.lognorm, stats.genextreme]): par_est = distr.fit(data, loc=max_bin_count_x) loc_est = par_est[-2] scale_est = par_est[-1] if idx == 0: normal_ditribution_mean = loc_est normal_ditribution_std_dev = scale_est if args.f: output_str += ('\n - Estimated parameters for %s distribution:' % distr.name) output_str += ('\n -> Location: ' + str(loc_est)) output_str += ('\n -> Scale: ' + str(scale_est)) else: # output_str += (',' + distr.name) output_str += (',' + str(loc_est)) output_str += (',' + str(scale_est)) x_values = np.linspace(x_min, x_max, 100000) y_values = distr.pdf(x_values, *par_est) plot_label = '' if len(par_est) == 2: plot_label = '$\mathrm{%s\ distribution:}\ location=%s,\ scale=%s$' % (distr_names[idx], str(loc_est), str(scale_est)) elif len(par_est) == 3: if args.f: output_str += ('\n -> Shape: ' + str(par_est[0])) else: output_str += (',' + str(par_est[0])) plot_label='$\mathrm{%s\ distribution:}\ location=%s,\ scale=%s,\ shape=%s$' % (distr_names[idx], str(loc_est), str(scale_est), str(par_est[0])) ax.plot(x_values, y_values, distr_colors[idx], linewidth=args.w, label=plot_label, alpha=0.75) ########################################################################## # graph plotting if args.g > 0 and abs(normal_ditribution_std_dev) > 0: x_min = np.max([normal_ditribution_mean - normal_ditribution_std_dev * args.g, x_min]) x_max = np.min([normal_ditribution_mean + normal_ditribution_std_dev * args.g, x_max]) if args.b <= 0: bin_width = (x_max - x_min) / args.n if args.b <= 0: x_min_final = x_min x_max_final = x_max number_bins = args.n else: x_min_final = int(x_min // bin_width) x_max_final = int(np.ceil(x_max / bin_width)) number_bins = np.max([x_max_final - x_min_final, 1]) x_min_final *= bin_width x_max_final *= bin_width plt.axis('tight') axlim = list(plt.axis()) axlim[0] = x_min_final axlim[1] = x_max_final if args.r: axlim[0] = np.max([axlim[0], 0.0]) if axlim[0] == axlim[1]: axlim[0] -= 1 axlim[1] += 1 n, bins, patches = plt.hist(data, number_bins, range=(x_min_final, x_max_final), normed=1, histtype='bar', facecolor='grey', linewidth=args.w, alpha=1.0) ax.yaxis.set_major_formatter(tk.FuncFormatter(lambda v, pos: "{:4.2f}".format(v * bin_width) + ' | ' + "{:4.2f}".format(v))) if args.a > 0: minorLocator = tk.AutoMinorLocator(args.a) ax.xaxis.set_minor_locator(minorLocator) if args.m > 0 and args.l <= 0: majorLocator = tk.MultipleLocator(args.m) ax.xaxis.set_major_locator(majorLocator) if args.l > 0: majorLocator = tk.LinearLocator(numticks=args.l) ax.xaxis.set_major_locator(majorLocator) axlim[3] = np.max(n) * 1.15 if axlim[2] == axlim[3]: axlim[2] -= 1 axlim[3] += 1 plt.axis(axlim) graph_legend = plt.legend(fancybox=True) graph_legend.get_frame().set_alpha(0.75) plt.draw() print output_str ########################################################################## # output if args.s: if args.q: output_path = ntpath.dirname(args.o) plt.savefig('%s/svg/%s.svgz' % (output_path, output_file_name), bbox_inches='tight') plt.savefig('%s/eps/%s.eps' % (output_path, output_file_name), bbox_inches='tight') plt.savefig('%s/pdf/%s.pdf' % (output_path, output_file_name), bbox_inches='tight') # plt.savefig('%s/png/%s.png' % (output_path, output_file_name), dpi=300, bbox_inches='tight') else: plt.savefig('%s.svgz' % args.o, bbox_inches='tight') plt.savefig('%s.eps' % args.o, bbox_inches='tight') plt.savefig('%s.pdf' % args.o, bbox_inches='tight') # plt.savefig('%s.png' % args.o, dpi=300, bbox_inches='tight') if args.d: plt.show() exit(0)
from nose.tools import with_setup from testconfig import config from pyvcloud import vcloudair from pyvcloud.vcloudair import VCA from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import NatRuleType, GatewayNatRuleType, ReferenceType, NatServiceType, FirewallRuleType, ProtocolsType class TestVCloud: def __init__(self): self.vca = None self.login_to_vcloud() def login_to_vcloud(self): """Login to vCloud""" username = config['vcloud']['username'] password = config['vcloud']['password'] service_type = config['vcloud']['service_type'] host = config['vcloud']['host'] version = config['vcloud']['version'] org = config['vcloud']['org'] service = config['vcloud']['service'] instance = config['vcloud']['instance'] self.vca = VCA(host=host, username=username, service_type=service_type, version=version, verify=True, log=True) assert self.vca if VCA.VCA_SERVICE_TYPE_STANDALONE == service_type: result = self.vca.login(password=password, org=org) assert result result = self.vca.login(token=self.vca.token, org=org, org_url=self.vca.vcloud_session.org_url) assert result elif VCA.VCA_SERVICE_TYPE_VCHS == service_type: result = self.vca.login(password=password) assert result result = self.vca.login(token=self.vca.token) assert result result = self.vca.login_to_org(service, org) assert result elif VCA.VCA_SERVICE_TYPE_VCA == service_type: result = self.vca.login(password=password) assert result result = self.vca.login_to_instance(password=password, instance=instance, token=None, org_url=None) assert result result = self.vca.login_to_instance(password=None, instance=instance, token=self.vca.vcloud_session.token, org_url=self.vca.vcloud_session.org_url) assert result def logout_from_vcloud(self): """Logout from vCloud""" print 'logout' self.vca.logout() self.vca = None assert self.vca is None def test_0001(self): """Loggin in to vCloud""" assert self.vca.token def test_0002(self): """Get VDC""" vdc_name = config['vcloud']['vdc'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name def test_0003(self): """Create vApp""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] catalog = config['vcloud']['catalog'] template = config['vcloud']['template'] network = config['vcloud']['network'] mode = config['vcloud']['mode'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name task = self.vca.create_vapp(vdc_name, vapp_name, template, catalog, vm_name=vm_name) assert task result = self.vca.block_until_completed(task) assert result the_vdc = self.vca.get_vdc(vdc_name) the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name def test_0004(self): """Disconnect vApp from pre-defined networks""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name task = the_vapp.disconnect_from_networks() assert task result = self.vca.block_until_completed(task) assert result def test_0005(self): """Connect vApp to network""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] network = config['vcloud']['network'] mode = config['vcloud']['mode'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name nets = filter(lambda n: n.name == network, self.vca.get_networks(vdc_name)) assert len(nets) == 1 the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name task = the_vapp.connect_to_network(nets[0].name, nets[0].href) result = self.vca.block_until_completed(task) assert result def test_0006(self): """Connect VM to network""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] network = config['vcloud']['network'] mode = config['vcloud']['mode'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name nets = filter(lambda n: n.name == network, self.vca.get_networks(vdc_name)) assert len(nets) == 1 the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name task = the_vapp.connect_vms(nets[0].name, connection_index=0, ip_allocation_mode=mode.upper()) result = self.vca.block_until_completed(task) assert result def test_0007(self): """Change vApp/VM Memory""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] memory = config['vcloud']['memory'] memory_new = config['vcloud']['memory_new'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name details = the_vapp.get_vms_details() assert details[0].get('memory_mb') == memory task = the_vapp.modify_vm_memory(vm_name, memory_new) assert task result = self.vca.block_until_completed(task) assert result the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name details = the_vapp.get_vms_details() assert details[0].get('memory_mb') == memory_new def test_0008(self): """Change vApp/VM CPU""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] cpus = config['vcloud']['cpus'] cpus_new = config['vcloud']['cpus_new'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name details = the_vapp.get_vms_details() assert details[0].get('cpus') == cpus task = the_vapp.modify_vm_cpu(vm_name, cpus_new) assert task result = self.vca.block_until_completed(task) assert result the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name details = the_vapp.get_vms_details() assert details[0].get('cpus') == cpus_new def test_0009(self): """Add NAT rule""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] rule_type = config['vcloud']['nat_rule_type'] original_ip = config['vcloud']['nat_original_ip'] original_port = config['vcloud']['nat_original_port'] translated_ip = config['vcloud']['nat_translated_ip'] translated_port = config['vcloud']['nat_translated_port'] protocol = config['vcloud']['nat_protocol'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name the_gateway.add_nat_rule(rule_type, original_ip, original_port, translated_ip, translated_port, protocol) task = the_gateway.save_services_configuration() assert task result = self.vca.block_until_completed(task) assert result def test_0010(self): """Get NAT rule""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] rule_type = config['vcloud']['nat_rule_type'] original_ip = config['vcloud']['nat_original_ip'] original_port = str(config['vcloud']['nat_original_port']) translated_ip = config['vcloud']['nat_translated_ip'] translated_port = str(config['vcloud']['nat_translated_port']) protocol = config['vcloud']['nat_protocol'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name nat_rules = the_gateway.get_nat_rules() found_rule = False for natRule in nat_rules: ruleId = natRule.get_Id() if rule_type == natRule.get_RuleType(): gatewayNatRule = natRule.get_GatewayNatRule() gateway_original_ip = gatewayNatRule.get_OriginalIp() if gatewayNatRule.get_OriginalIp() else 'any' gateway_original_port = gatewayNatRule.get_OriginalPort() if gatewayNatRule.get_OriginalPort() else 'any' gateway_translated_ip = gatewayNatRule.get_TranslatedIp() if gatewayNatRule.get_TranslatedIp() else 'any' gateway_translated_port = gatewayNatRule.get_TranslatedPort() if gatewayNatRule.get_TranslatedPort() else 'any' gateway_protocol = gatewayNatRule.get_Protocol() if gatewayNatRule.get_Protocol() else 'any' if original_ip == gateway_original_ip and \ original_port == gateway_original_port and \ translated_ip == gateway_translated_ip and \ translated_port == gateway_translated_port and \ protocol == gateway_protocol: found_rule = True assert found_rule def test_0011(self): """Delete NAT rule""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] rule_type = config['vcloud']['nat_rule_type'] original_ip = config['vcloud']['nat_original_ip'] original_port = str(config['vcloud']['nat_original_port']) translated_ip = config['vcloud']['nat_translated_ip'] translated_port = str(config['vcloud']['nat_translated_port']) protocol = config['vcloud']['nat_protocol'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name details = the_vapp.get_vms_details() the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name found_rule = the_gateway.del_nat_rule(rule_type, original_ip, original_port, translated_ip, translated_port, protocol) assert found_rule task = the_gateway.save_services_configuration() assert task result = self.vca.block_until_completed(task) assert result the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name nat_rules = the_gateway.get_nat_rules() found_rule = False for natRule in nat_rules: ruleId = natRule.get_Id() if rule_type == natRule.get_RuleType(): gatewayNatRule = natRule.get_GatewayNatRule() gateway_original_ip = gatewayNatRule.get_OriginalIp() if gatewayNatRule.get_OriginalIp() else 'any' gateway_original_port = gatewayNatRule.get_OriginalPort() if gatewayNatRule.get_OriginalPort() else 'any' gateway_translated_ip = gatewayNatRule.get_TranslatedIp() if gatewayNatRule.get_TranslatedIp() else 'any' gateway_translated_port = gatewayNatRule.get_TranslatedPort() if gatewayNatRule.get_TranslatedPort() else 'any' gateway_protocol = gatewayNatRule.get_Protocol() if gatewayNatRule.get_Protocol() else 'any' if original_ip == gateway_original_ip and \ original_port == gateway_original_port and \ translated_ip == gateway_translated_ip and \ translated_port == gateway_translated_port and \ protocol == gateway_protocol: found_rule = True break assert found_rule == False def test_0012(self): """Enable Firewall service""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name the_gateway.enable_fw(True) task = the_gateway.save_services_configuration() assert task result = self.vca.block_until_completed(task) assert result the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name assert the_gateway.is_fw_enabled() def test_0013(self): """Add Firewall rule""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] is_enable = config['vcloud']['fw_is_enable'] description = config['vcloud']['fw_description'] policy = config['vcloud']['fw_policy'] dest_ip = config['vcloud']['fw_dest_port'] dest_port = config['vcloud']['fw_dest_ip'] protocol = config['vcloud']['fw_protocol'] source_ip = config['vcloud']['fw_source_ip'] source_port = config['vcloud']['fw_source_port'] enable_logging = config['vcloud']['fw_enable_logging'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name the_gateway.add_fw_rule(is_enable, description, policy, protocol, dest_port, dest_ip, source_port, source_ip, enable_logging) task = the_gateway.save_services_configuration() assert task result = self.vca.block_until_completed(task) assert result def _create_protocols_type(self, protocol): all_protocols = {"Tcp": None, "Udp": None, "Icmp": None, "Any": None} all_protocols[protocol] = True return ProtocolsType(**all_protocols) def test_0014(self): """Get Firewall rule""" def create_protocol_list(protocol): plist = [] plist.append(protocol.get_Tcp()) plist.append(protocol.get_Any()) plist.append(protocol.get_Tcp()) plist.append(protocol.get_Udp()) plist.append(protocol.get_Icmp()) plist.append(protocol.get_Other()) return plist vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] is_enable = config['vcloud']['fw_is_enable'] description = config['vcloud']['fw_description'] policy = config['vcloud']['fw_policy'] dest_ip = config['vcloud']['fw_dest_port'] dest_port = config['vcloud']['fw_dest_ip'] protocol = config['vcloud']['fw_protocol'] source_ip = config['vcloud']['fw_source_ip'] source_port = config['vcloud']['fw_source_port'] enable_logging = config['vcloud']['fw_enable_logging'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name rules = the_gateway.get_fw_rules() to_find_trait = (create_protocol_list(self._create_protocols_type(protocol)), dest_port, dest_ip, source_port, source_ip) rule_found = False for rule in rules: current_trait = (create_protocol_list(rule.get_Protocols()), rule.get_DestinationPortRange(), rule.get_DestinationIp(), rule.get_SourcePortRange(), rule.get_SourceIp()) if current_trait == to_find_trait: rule_found = True break assert rule_found def test_0015(self): """Delete Firewall rule""" def create_protocol_list(protocol): plist = [] plist.append(protocol.get_Tcp()) plist.append(protocol.get_Any()) plist.append(protocol.get_Tcp()) plist.append(protocol.get_Udp()) plist.append(protocol.get_Icmp()) plist.append(protocol.get_Other()) return plist vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] gateway_name = config['vcloud']['gateway'] is_enable = config['vcloud']['fw_is_enable'] description = config['vcloud']['fw_description'] policy = config['vcloud']['fw_policy'] dest_ip = config['vcloud']['fw_dest_port'] dest_port = config['vcloud']['fw_dest_ip'] protocol = config['vcloud']['fw_protocol'] source_ip = config['vcloud']['fw_source_ip'] source_port = config['vcloud']['fw_source_port'] enable_logging = config['vcloud']['fw_enable_logging'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name the_gateway.delete_fw_rule(protocol, dest_port, dest_ip, source_port, source_ip) task = the_gateway.save_services_configuration() assert task result = self.vca.block_until_completed(task) assert result the_gateway = self.vca.get_gateway(vdc_name, gateway_name) assert the_gateway assert the_gateway.get_name() == gateway_name rules = the_gateway.get_fw_rules() to_find_trait = (create_protocol_list(self._create_protocols_type(protocol)), dest_port, dest_ip, source_port, source_ip) rule_found = False for rule in rules: current_trait = (create_protocol_list(rule.get_Protocols()), rule.get_DestinationPortRange(), rule.get_DestinationIp(), rule.get_SourcePortRange(), rule.get_SourceIp()) if current_trait == to_find_trait: rule_found = True break assert rule_found == False def test_0020(self): """Power On vApp""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name assert the_vapp.me.get_status() == 8 task = the_vapp.poweron() assert task result = self.vca.block_until_completed(task) assert result the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp != None assert the_vapp.me.get_status() == 4 def test_0022(self): """Power Off vApp""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp assert the_vapp.name == vapp_name assert the_vapp.me.get_status() == 4 task = the_vapp.poweroff() assert task result = self.vca.block_until_completed(task) assert result the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp != None assert the_vapp.me.get_status() == 8 def test_0099(self): """Delete vApp""" vdc_name = config['vcloud']['vdc'] vapp_name = config['vcloud']['vapp'] vm_name = config['vcloud']['vm'] catalog = config['vcloud']['catalog'] template = config['vcloud']['template'] network = config['vcloud']['network'] mode = config['vcloud']['mode'] the_vdc = self.vca.get_vdc(vdc_name) assert the_vdc assert the_vdc.get_name() == vdc_name task = self.vca.delete_vapp(vdc_name, vapp_name) assert task result = self.vca.block_until_completed(task) assert result the_vapp = self.vca.get_vapp(the_vdc, vapp_name) assert the_vapp == None
#import urllib2 import urllib import subprocess import time import os.path import sys import getopt from Bio.PDB import * import openbabel import pybel import yaml from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import Draw import re import os from collections import Counter import numpy as np import collections from math import pi, degrees from operator import itemgetter, attrgetter, methodcaller import getopt import sys import shutil AA = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', 'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G', 'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K', 'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', 'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V'} HBD = {'H', 'K', 'N', 'Q', 'R', 'S', 'T', 'W', 'Y'} HBA = {'D', 'E', 'H', 'N', 'Q', 'S', 'T', 'Y'} NEGATIVE = {'D', 'E'} POSITIVE = {'H', 'K', 'R'} AROMATIC = {'TYR', 'TRP', 'PHE', 'HIS'} CHARGEDAA = {'ARG', 'LYS', 'ASP', 'GLU'} # skip ,'HIS' HYDROPHOBIC_AA = {'A', 'C', 'F', 'I', 'L', 'M', 'P', 'V', 'W', 'Y'} projectdir = '/tmp/interactions/' if not os.path.exists(projectdir): os.makedirs(projectdir) os.chmod(projectdir, 0o777) tempdir = projectdir + 'temp/' if not os.path.exists(tempdir): os.makedirs(tempdir) os.chmod(tempdir, 0o777) ignore_het = ['NA', 'W'] # ignore sodium and water radius = 5 hydrophob_radius = 4.5 ignore_het = ['NA', 'W'] # ignore sodium and water debug = False def fetch_pdb(id): url = 'http://www.rcsb.org/pdb/files/%s.pdb' % id return urllib.urlopen(url).read() def check_unique_ligand_mol(filename): # check that only HETATM are exported to file f_in = open(filename, 'r') tempstr = '' check = [] ligandid = 0 chainid = 0 for line in f_in: if line.startswith('HETATM'): residue_number = line[22:26] chain = line[21] if (residue_number != ligandid and ligandid != 0) or (chain != chainid and chainid != 0): continue ligandid = residue_number chainid = chain tempstr += line f_in.close() f = open(filename, 'w') f.write(tempstr) f.close() def check_pdb(): # check if PDB is there, otherwise fetch if not os.path.exists(projectdir + 'pdbs/'): os.makedirs(projectdir + 'pdbs/') if not os.path.isfile(projectdir + 'pdbs/' + pdbname + '.pdb'): pdbfile = fetch_pdb(pdbname) temp_path = projectdir + 'pdbs/' + pdbname + '.pdb' f = open(temp_path, 'w') f.write(pdbfile) f.close() def checkdirs(): # check that dirs are there and have right permissions directory = projectdir + 'results/' + pdbname if os.path.exists(directory): shutil.rmtree(directory) directory = projectdir + 'results/' + pdbname + '/interaction' if not os.path.exists(directory): os.makedirs(directory) os.chmod(directory, 0o777) directory = projectdir + 'results/' + pdbname + '/ligand' if not os.path.exists(directory): os.makedirs(directory) os.chmod(directory, 0o777) directory = projectdir + 'results/' + pdbname + '/output' if not os.path.exists(directory): os.makedirs(directory) os.chmod(directory, 0o777) directory = projectdir + 'results/' + pdbname + '/png' if not os.path.exists(directory): os.makedirs(directory) os.chmod(directory, 0o777) directory = projectdir + 'results/' + pdbname + '/fragments' if not os.path.exists(directory): os.makedirs(directory) os.chmod(directory, 0o777) def find_ligand_full_names(): pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb' residuename = '' f_in = open(pdbfile, 'r') d = {} for line in f_in: if line.startswith('HETSYN'): # need to fix bad PDB formatting where col4 and col5 are put # together for some reason -- usually seen when the id is +1000 m = re.match("HETSYN[\s]+([\w]{3})[\s]+(.+)", line) if (m): d[m.group(1)] = m.group(2).strip() return d def fragment_library(ligand, atomvector, atomname, residuenr, chain, typeinteraction): #if debug: #print "Make fragment pdb file for ligand:", ligand, "atom vector", atomvector, "atomname", atomname, "residuenr from protein", residuenr, typeinteraction, 'chain', chain residuename = 'unknown' ligand_pdb = projectdir + 'results/' + pdbname + \ '/ligand/' + ligand + '_' + pdbname + '.pdb' mol = pybel.readfile("pdb", ligand_pdb).next() mol.removeh() listofvectors = [] chain = chain.strip() if atomvector is not None: for atom in mol: distance = (Vector(getattr(atom, 'coords')) - atomvector).norm() if distance > 0.1: continue # print "Parent:",getattr(atom,'type'),getattr(atom,'idx') # ,Vector(getattr(atom,'coords')) listofvectors.append(Vector(getattr(atom, 'coords'))) for neighbour_atom in openbabel.OBAtomAtomIter(atom.OBAtom): # print neighbour_atom.GetAtomicNum() neighbor = pybel.Atom(neighbour_atom) # print # "Neighbour:",neighbour_atom.GetType(),Vector(getattr(neighbor,'coords')) listofvectors.append(Vector(getattr(neighbor, 'coords'))) for neighbour_atom2 in openbabel.OBAtomAtomIter(neighbour_atom): # print neighbour_atom.GetAtomicNum() neighbor2 = pybel.Atom(neighbour_atom2) # print # "Neighbour2:",neighbour_atom2.GetType(),Vector(getattr(neighbor2,'coords')) listofvectors.append(Vector(getattr(neighbor2, 'coords'))) #if debug: #print "vectors:", listofvectors pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb' f_in = open(pdbfile, 'r') tempstr = '' for line in f_in: if line.startswith('HETATM'): atomvector = Vector(line[30:38], line[38:46], line[46:54]) residue_number = line[22:26] tempchain = line[21] skip = 1 for targetvector in listofvectors: distance = (targetvector - atomvector).norm() if distance < 0.1: # print "FOUND!" skip = 0 if skip == 1: continue elif line.startswith('ATOM'): residue_number = line[22:26].strip() tempchain = line[21].strip() if residue_number != residuenr: continue if tempchain != chain: continue residuenr = residue_number chain = tempchain residuename = line[17:20].strip() else: continue # ignore all other lines tempstr += line filename = projectdir + 'results/' + pdbname + '/fragments/' + pdbname + "_" + ligand + \ "_" + residuename + residuenr + chain + "_" + \ atomname + "_" + typeinteraction + ".pdb" # if debug: # print filename f_in.close() f = open(filename, 'w') f.write(tempstr) f.close() mol = pybel.readfile("pdb", filename).next() mol.write("pdb", filename, overwrite=True) return filename def fragment_library_aromatic(ligand, atomvectors, residuenr, chain, ringnr): # print "Make aromatic fragment pdb file for ligand:",ligand,"atom # vectors",atomvectors,"residuenr from protein", residuenr chain = chain.strip() pdbfile = projectdir + 'pdbs/' + pdbname + '.pdb' residuename = '' f_in = open(pdbfile, 'r') tempstr = '' for line in f_in: if line.startswith('HETATM'): atomvector = Vector(line[30:38], line[38:46], line[46:54]) skip = 1 for targetvector in atomvectors: distance = (targetvector - atomvector).norm() if distance < 0.1: # print "FOUND!" skip = 0 if skip == 1: continue elif line.startswith('ATOM'): residue_number = line[22:26].strip() tempchain = line[21].strip() if residue_number != residuenr: continue if tempchain != chain: continue residuename = line[17:20].strip() chain = tempchain else: continue # ignore all other lines tempstr += line filename = projectdir + 'results/' + pdbname + '/fragments/' + pdbname + "_" + ligand + \ "_" + residuename + str(residuenr) + chain + \ "_aromatic_" + str(ringnr) + ".pdb" # print tempstr f_in.close() f = open(filename, 'w') f.write(tempstr) f.close() return filename def create_ligands_and_poseview(): class HetSelect(Select): def accept_residue(self, residue): if residue.get_resname().strip() == HETNAM: return 1 else: return 0 class ClassSelect(Select): def accept_residue(self, residue): if residue.get_parent().id == peptideligand: return 1 else: return 0 p = PDBParser(QUIET=True) s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb') # Disable warnings hetflag_done = {} for model in s: for chain in model: for residue in chain: hetresname = residue.get_resname() # catch residues with hetflag hetflag = residue.get_full_id()[3][0].strip() hetflag = hetflag.replace("H_", "").strip() #hetflag = hetflag.replace("W","") #print(hetflag) if peptideligand and chain.id==peptideligand: hetflag= 'pep' if peptideligand and chain.id!=peptideligand: continue if hetflag and hetflag not in ignore_het: if not hetflag in hetflag_done: hetflag_done[hetflag] = 1 HETNAM = hetflag temp_path = projectdir + 'results/' + pdbname + \ '/ligand/' + HETNAM + '_' + pdbname + '.sdf' ligand_pdb = projectdir + 'results/' + pdbname + \ '/ligand/' + HETNAM + '_' + pdbname + '.pdb' ligand_sdf = projectdir + 'results/' + pdbname + \ '/ligand/' + HETNAM + '_' + pdbname + '.sdf' ligand_inchi = projectdir + 'results/' + pdbname + \ '/ligand/' + HETNAM + '_' + pdbname + '.inchi' ligand_poseview = projectdir + 'results/' + \ pdbname + '/png/' + pdbname + '_' + HETNAM + '.png' ligand_png = projectdir + 'results/' + pdbname + '/png/' + HETNAM + '.png' # if sdf not made, make it #Always make them for now if not os.path.isfile(ligand_pdb) or 1 == 1: io = PDBIO() io.set_structure(s) if peptideligand and chain.id==peptideligand: io.save(ligand_pdb, ClassSelect()) else: io.save(ligand_pdb, HetSelect()) check_unique_ligand_mol(ligand_pdb) if len(list(pybel.readfile("pdb", ligand_pdb))) == 0: continue obConversion = openbabel.OBConversion() obConversion.SetInAndOutFormats("pdb", "inchi") obConversion.SetOptions( "K", obConversion.OUTOPTIONS) mol = openbabel.OBMol() # Open Babel will uncompress automatically obConversion.ReadFile(mol, ligand_pdb) obConversion.WriteFile(mol, ligand_inchi) inchikey = obConversion.WriteString(mol) inchikeys[HETNAM] = inchikey.strip() #smiles[HETNAM] = smile smiles[HETNAM] = pybel.readfile( "pdb", ligand_pdb).next().write("smi").split("\t")[0] mol = pybel.readfile("pdb", ligand_pdb).next() mol.OBMol.AddHydrogens(False, True, 7.4) mol.write("pdb", ligand_pdb, overwrite=True) obConversion = openbabel.OBConversion() obConversion.SetInAndOutFormats("pdb", "sdf") mol = openbabel.OBMol() # Open Babel will uncompress automatically obConversion.ReadFile(mol, ligand_pdb) obConversion.WriteFile(mol, ligand_sdf) # if png of ligand not made, make it if not os.path.isfile(ligand_png): m = Chem.MolFromMolFile(ligand_sdf) # Draw.MolToFile(m,ligand_png) # if interaction png not made, make it #SKIP poseview # stuff if not os.path.isfile(ligand_poseview) and 1 == 2: cmd = "poseview -l " + ligand_sdf + " -p " + projectdir + \ "pdbs/" + pdbname + ".pdb -o " + ligand_poseview #print('Running cmd ' + cmd) proc = subprocess.Popen( [cmd], stdout=subprocess.PIPE, shell=True) while proc.poll() is None: time.sleep(1) #(out, err) = proc.communicate() else: # print "Already made # Poseview:",pdbname+"_"+HETNAM+".png" continue # print "Done "+str(len(hetflag_done)) def addresiduestoligand(ligand, pdb, residuelist): temp_path = projectdir + 'pdbs/' + pdb + '.pdb' f_in = open(temp_path, 'r') inserstr = '' check = [] # print filename ligandid = 0 chainid = 0 for line in f_in: if line.startswith('ATOM'): temp = line.split() # need to fix bad PDB formatting where col4 and col5 are put # together for some reason -- usually seen when the id is +1000 m = re.match("(\w)(\d+)", temp[4]) if (m): temp[4] = m.group(1) temp[5] = m.group(2) aaname = temp[3] + temp[5] + temp[4] if aaname in residuelist: # print aaname inserstr += line # print inserstr f_in.close() # ligands/'+hetflag+'_'+pdbname+".pdb") temp_path = projectdir + 'results/' + pdbname + \ '/ligand/' + ligand + '_' + pdb + '.pdb' f_in = open(temp_path, 'r') tempstr = '' inserted = 0 for line in f_in: if line.startswith('ATOM'): temp = line.split() if temp[2] == 'H': continue # skip hydrogen in model if (line.startswith('CONECT') or line.startswith('MASTER') or line.startswith('END')) and inserted == 0: tempstr += inserstr inserted = 1 tempstr += line # print tempstr # print tempstr f_in.close() f = open(projectdir + 'results/' + pdbname + '/interaction/' + pdb + '_' + ligand + '.pdb', 'w') f.write(tempstr) f.close() def get_ring_from_aa(residueid): class AAselect(Select): def accept_residue(self, residue): # print residue.get_full_id()[3][1],residueid if str(residue.get_full_id()[3][1]) == residueid: return 1 else: return 0 ptemp = PDBParser(QUIET=True) # disable warnings stemp = ptemp.get_structure( pdbname, projectdir + 'pdbs/' + pdbname + '.pdb') temp_aa_id = residueid io = PDBIO() io.set_structure(stemp) io.save(projectdir + 'temp/' + residueid + '.pdb', AAselect()) mol = pybel.readfile("pdb", projectdir + 'temp/' + residueid + '.pdb').next() # print hetflag rings = getattr(mol, "OBMol").GetSSSR() ringlist = [] for ring in rings: center = Vector(0.0, 0.0, 0.0) members = ring.Size() if ring.IsAromatic(): atomlist = [] atomnames = [] atomvectors = [] for atom in mol: if ring.IsMember(atom.OBAtom): a_vector = Vector(getattr(atom, 'coords')) center += a_vector atomlist.append(atom.idx) atomvectors.append(a_vector) atomnames.append(getattr(atom, 'type')) center = center / members normal = center - a_vector # vector in plane normal1 = center - atomvectors[0] normal2 = center - atomvectors[2] normal = Vector(np.cross([normal1[0],normal1[1],normal1[2]],[normal2[0],normal2[1],normal2[2]])) ringlist.append([atomlist, center, normal, atomnames, atomvectors]) return ringlist def get_hydrogen_from_aa(residueid): class AAselect(Select): def accept_residue(self, residue): # print residue.get_full_id()[3][1],residueid if str(residue.get_full_id()[3][1]) == residueid: return 1 else: return 0 ptemp = PDBParser(QUIET=True) stemp = ptemp.get_structure( pdbname, projectdir + 'pdbs/' + pdbname + '.pdb') temp_aa_id = residueid io = PDBIO() io.set_structure(stemp) io.save(projectdir + 'temp/' + residueid + '.pdb', AAselect()) mol = pybel.readfile("pdb", projectdir + 'temp/' + residueid + '.pdb').next() mol.OBMol.AddHydrogens(False, True, 7.4) # print hetflag donors = [] for atom in mol: if getattr(atom, 'OBAtom').IsHbondDonor(): chargevector = Vector(getattr(atom, 'coords')) # print getattr(atom,'type')," is Donor",chargevector temphatoms = [] for neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom): neighbor = pybel.Atom(neighbor) if getattr(neighbor, 'type') == "H": # print "neighbor # Atom",getattr(neighbor,'type'),"Coords:",getattr(neighbor,'coords') temphatoms.append(Vector(getattr(neighbor, 'coords'))) donors.append([getattr(atom, 'type'), chargevector, temphatoms,getattr(atom, 'OBAtom').IsHbondAcceptor()]) if getattr(atom, 'OBAtom').IsHbondAcceptor(): chargevector = Vector(getattr(atom, 'coords')) #print getattr(atom, 'type'),chargevector,'acceptor!' return donors def build_ligand_info(): count_atom_ligand = {} p = PDBParser(QUIET=True) s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb') for model in s: for chain in model: for residue in chain: hetresname = residue.get_resname() # catch residues with hetflag hetflag = residue.get_full_id()[3][0].strip() hetflag = hetflag.replace("H_", "").strip() #hetflag = hetflag.replace("W","") if peptideligand and chain.id==peptideligand: hetflag= 'pep' if peptideligand and chain.id!=peptideligand: continue if hetflag and hetflag not in ignore_het: # if goodhet!='' and hetflag!=goodhet and # "H_"+goodhet!=hetflag: continue ### Only look at the # ligand that has an image from poseview made for it. if hetflag not in hetlist or (peptideligand and chain.id==peptideligand): if len(list(pybel.readfile("pdb", projectdir + 'results/' + pdbname + '/ligand/' + hetflag + '_' + pdbname + '.pdb'))) == 0: # This ligand has no molecules # print('no info for',hetflag) continue if hetflag not in hetlist: #do not recreate for peptides hetlist[hetflag] = [] ligand_charged[hetflag] = [] ligand_donors[hetflag] = [] ligand_acceptors[hetflag] = [] count_atom_ligand[hetflag] = 0 mol = pybel.readfile( "pdb", projectdir + 'results/' + pdbname + '/ligand/' + hetflag + '_' + pdbname + ".pdb").next() # print "LIGAND",hetflag rings = getattr(mol, "OBMol").GetSSSR() # http://python.zirael.org/e-openbabel4.html ringlist = [] for ring in rings: center = Vector(0.0, 0.0, 0.0) members = ring.Size() if ring.IsAromatic(): # print "Found an aromatic ring" atomlist = [] atomnames = [] vectorlist = [] for atom in mol: if ring.IsMember(atom.OBAtom): # print atom.idx,getattr(atom,'type'), # ring.IsMember( atom.OBAtom) a_vector = Vector( getattr(atom, 'coords')) center += a_vector atomlist.append(atom.idx) vectorlist.append(a_vector) atomnames.append(getattr(atom, 'type')) center = center / members normal = center - a_vector # vector in plane #print center - vectorlist[0],center - vectorlist[2] normal1 = center - vectorlist[0] normal2 = center - vectorlist[2] normal = Vector(np.cross([normal1[0],normal1[1],normal1[2]],[normal2[0],normal2[1],normal2[2]])) ringlist.append( [atomlist, center, normal, atomnames, vectorlist]) ligand_rings[hetflag] = ringlist for atom in mol: #print "Atom",getattr(atom,'type'),"Coords:",getattr(atom,'coords'),"FormalCharge:",getattr(atom,'formalcharge'),"PartialCharge",getattr(atom,'partialcharge') if getattr(atom, 'formalcharge') != 0: chargevector = Vector(getattr(atom, 'coords')) ligand_charged[hetflag].append( [getattr(atom, 'type'), chargevector, getattr(atom, 'formalcharge')]) if getattr(atom, 'OBAtom').IsCarboxylOxygen(): chargevector = Vector(getattr(atom, 'coords')) # print getattr(atom,'type')," is # CarboxylOxygen",chargevector ligand_charged[hetflag].append( [getattr(atom, 'type'), chargevector, -1]) if getattr(atom, 'OBAtom').IsHbondDonor(): chargevector = Vector(getattr(atom, 'coords')) # print getattr(atom,'type')," is # Donor",chargevector temphatoms = [] for neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom): neighbor = pybel.Atom(neighbor) if getattr(neighbor, 'type') == "H": # print "neighbor # Atom",getattr(neighbor,'type'),"Coords:",getattr(neighbor,'coords') temphatoms.append( Vector(getattr(neighbor, 'coords'))) ligand_donors[hetflag].append( [getattr(atom, 'type'), chargevector, temphatoms]) if getattr(atom, 'OBAtom').IsHbondAcceptor(): chargevector = Vector(getattr(atom, 'coords')) # print getattr(atom,'type')," is Acceptor",chargevector ligand_acceptors[hetflag].append([getattr(atom, 'type'), chargevector]) # ligand_charged[hetflag].append([getattr(atom,'type'),chargevector,-1]) # Function to get ligand centers to maybe skip some # residues check = 0 center = Vector(0.0, 0.0, 0.0) if peptideligand and chain.id==peptideligand: if hetflag in ligandcenter: center = ligandcenter[hetflag][2] for atom in residue: het_atom = atom.name atom_vector = atom.get_vector() center += atom_vector hetlist[hetflag].append( [hetresname, het_atom, atom_vector]) if not hetflag in ligand_atoms: # make the ligand_atoms ready ligand_atoms[hetflag] = [] ligand_atoms[hetflag].append( [count_atom_ligand[hetflag], atom_vector, het_atom]) count_atom_ligand[hetflag] += 1 ligandcenter[hetflag] = [center, count_atom_ligand[hetflag]] else: for atom in residue: if check == 0 and hetflag in ligand_atoms: continue # skip when there are many of same ligand het_atom = atom.name check = 1 atom_vector = atom.get_vector() center += atom_vector hetlist[hetflag].append( [hetresname, het_atom, atom_vector]) if not hetflag in ligand_atoms: # make the ligand_atoms ready ligand_atoms[hetflag] = [] ligand_atoms[hetflag].append( [count_atom_ligand[hetflag], atom_vector, het_atom]) count_atom_ligand[hetflag] += 1 center2 = center / count_atom_ligand[hetflag] ligandcenter[hetflag] = [ center2, count_atom_ligand[hetflag],center] def remove_hyd(aa,ligand): templist = [] for res in new_results[ligand]['interactions']: #print res[0],res[2],aa if res[0]==aa and (res[2]=='HYD' or res[2]=='hyd'): continue else: templist.append(res) new_results[ligand]['interactions'] = templist def check_other_aromatic(aa,ligand,info): templist = [] check = True for res in new_results[ligand]['interactions']: #print res[0],res[2],aa if res[0]==aa and res[4]=='aromatic': #if the new aromatic interaction has a center-center distance greater than the old one, keep old. if info['Distance']>res[6]['Distance']: templist.append(res) check = False #Do not add the new one. else: #if not, delete the old one, as the new is better. check = True #add the new one continue else: templist.append(res) new_results[ligand]['interactions'] = templist return check # LOOP OVER RECEPTOR AND FIND INTERACTIONS def find_interactions(): global count_calcs, count_skips count_atom = 0 count_skips = 0 count_calcs = 0 p = PDBParser(QUIET=True) s = p.get_structure(pdbname, projectdir + 'pdbs/' + pdbname + '.pdb') for model in s: for chain in model: chainid = chain.get_id() if peptideligand and chainid==peptideligand: continue for residue in chain: aa_resname = residue.get_resname() aa_seqid = str(residue.get_full_id()[3][1]) hetflagtest = str(residue.get_full_id()[3][0]).strip() aaname = aa_resname + aa_seqid + chainid hetflagtest = hetflagtest.replace("H_", "") #hetflagtest = hetflagtest.replace("W","") if hetflagtest: continue # residue is a hetnam if hetflagtest in hetlist: continue # residue is a hetnam # print "Looking at ",aa_resname,aa_seqid,chainid countresidue = count_atom # print aaname # could probably make a check here to see if this residue was # anywhere near the ligand, otherwise skip the check per atom for hetflag, atomlist in hetlist.iteritems(): if not 'CA' in residue: # prevent errors continue ca = residue['CA'].get_vector() if (ca - ligandcenter[hetflag][0]).norm() > ligandcenter[hetflag][1]: # print "skipping" count_skips += 1 continue count_atom = countresidue sum = 0 hydrophobic_count = 0 accesible_check = 0 # if goodhet!='' and hetflag!=goodhet and # "H_"+goodhet!=hetflag: continue ### Only look at the # ligand that has an image from poseview made for it. tempdistance = radius for atom in atomlist: #print(hetflag,atom) hetresname = atom[0] het_atom = atom[1] het_vector = atom[2] hydrophobic_check = 1 aaatomlist = [] for atom in residue: count_atom += 1 aa_vector = atom.get_vector() aa_atom = atom.name aa_atom_type = atom.element aaatomlist.append([count_atom, aa_vector, aa_atom]) d = (het_vector - aa_vector) count_calcs += 1 if d.norm() < radius: if not hetflag in results: results[hetflag] = {} summary_results[hetflag] = {'score': [], 'hbond': [], 'hbondplus': [], 'hbond_confirmed': [], 'aromatic': [],'aromaticff': [], 'ionaromatic': [], 'aromaticion': [], 'aromaticef': [], 'aromaticfe': [], 'hydrophobic': [], 'waals': [], 'accessible':[]} new_results[hetflag] = {'interactions':[]} if not aaname in results[hetflag]: results[hetflag][aaname] = [] if not (het_atom[0] == 'H' or aa_atom[0] == 'H' or aa_atom_type=='H'): #print(aa_atom_type) results[hetflag][aaname].append([het_atom, aa_atom, round( d.norm(), 2), het_vector, aa_vector, aa_seqid, chainid]) tempdistance = round(d.norm(), 2) sum += 1 # if both are carbon then we are making a hydrophic # interaction if het_atom[0] == 'C' and aa_atom[0] == 'C' and d.norm() < hydrophob_radius and hydrophobic_check: hydrophobic_count += 1 hydrophobic_check = 0 if d.norm() < 5 and (aa_atom!='C' and aa_atom!='O' and aa_atom!='N'): #print(aa_atom) accesible_check = 1 if accesible_check: #if accessible! summary_results[hetflag]['accessible'].append( [aaname]) fragment_file = fragment_library(hetflag, None, '', aa_seqid, chainid, 'access') new_results[hetflag]['interactions'].append([aaname,fragment_file,'acc','accessible','hidden','']) if hydrophobic_count > 2 and AA[aaname[0:3]] in HYDROPHOBIC_AA: # min 3 c-c interactions summary_results[hetflag]['hydrophobic'].append( [aaname, hydrophobic_count]) fragment_file = fragment_library(hetflag, None, '', aa_seqid, chainid, 'hydrop') new_results[hetflag]['interactions'].append([aaname,fragment_file,'hyd','hydrophobic','hydrophobic','']) if sum > 1 and aa_resname in AROMATIC: # if debug: # , get_ring_atoms(aaatomlist) # print "Need to analyse aromatic ring in ", aaname aarings = get_ring_from_aa(aa_seqid) if not aarings: # print "Could not find aromatic ring in",aaname continue #print "amount of rings in AA",len(aarings) for aaring in aarings: #aaring = aaring[0] # res_ring center = aaring[1] count = 0 #print "AARING",aaring for ring in ligand_rings[hetflag]: # print ring shortest_center_het_ring_to_res_atom = 10 shortest_center_aa_ring_to_het_atom = 10 # print aaring[4] # print ring[4] for a in aaring[4]: if (ring[1] - a).norm() < shortest_center_het_ring_to_res_atom: shortest_center_het_ring_to_res_atom = (ring[1] - a).norm() for a in ring[4]: if (center - a).norm() < shortest_center_aa_ring_to_het_atom: shortest_center_aa_ring_to_het_atom = (center - a).norm() count += 1 # take vector from two centers, and compare against # vector from center to outer point -- this will # give the perpendicular angel. angle = Vector.angle(center - ring[1], ring[2]) #aacenter to ring center vs ring normal # take vector from two centers, and compare against # vector from center to outer point -- this will # give the perpendicular angel. angle2 = Vector.angle(center - ring[1], aaring[2]) #aacenter to ring center vs AA normal angle3 = Vector.angle(ring[2], aaring[2]) #two normal vectors against eachother #print "angleaa",aaring[2],"anglelig",ring[2] angle_degrees = [ round(degrees(angle), 1), round(degrees(angle2), 1), round(degrees(angle3), 1)] distance = (center - ring[1]).norm() #if debug: #print aaname,"Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', shortest_center_het_ring_to_res_atom, 'Shortest lig->rescenter', shortest_center_aa_ring_to_het_atom if distance < 5 and (angle_degrees[2]<20 or abs(angle_degrees[2]-180)<20): # poseview uses <5 # print "Ring # #",count,"Distance:",round(distance,2), # "Angle:",round(angle_degrees,2) summary_results[hetflag]['aromatic'].append( [aaname, count, round(distance, 2), angle_degrees]) fragment_file = fragment_library_aromatic( hetflag, ring[4], aa_seqid, chainid, count) if debug: print aaname,"F2F Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2) if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}): new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_ff','aromatic (face-to-face)','aromatic','none',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}]) remove_hyd(aaname,hetflag) # need to be careful for edge-edge elif (shortest_center_aa_ring_to_het_atom < 4.5) and abs(angle_degrees[0]-90)<30 and abs(angle_degrees[2]-90)<30: summary_results[hetflag]['aromaticfe'].append( [aaname, count, round(distance, 2), angle_degrees]) fragment_file = fragment_library_aromatic( hetflag, ring[4], aa_seqid, chainid, count) if debug: print aaname,"FE Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2) if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}): new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_fe_protein','aromatic (face-to-edge)','aromatic','protein',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}]) remove_hyd(aaname,hetflag) # need to be careful for edge-edge elif (shortest_center_het_ring_to_res_atom < 4.5) and abs(angle_degrees[1]-90)<30 and abs(angle_degrees[2]-90)<30: summary_results[hetflag]['aromaticef'].append( [aaname, count, round(distance, 2), angle_degrees]) fragment_file = fragment_library_aromatic( hetflag, ring[4], aa_seqid, chainid, count) if debug: print aaname,"EF Ring #", count, "Distance:", round(distance, 2), "Angle:", angle_degrees, 'Shortest res->ligcenter', round(shortest_center_het_ring_to_res_atom,2), 'Shortest lig->rescenter', round(shortest_center_aa_ring_to_het_atom,2) if check_other_aromatic(aaname,hetflag,{'Distance':round(distance, 2),'Angles':angle_degrees}): new_results[hetflag]['interactions'].append([aaname,fragment_file,'aro_ef_protein','aromatic (edge-to-face)','aromatic','protein',{'Distance':round(distance, 2),'ResAtom to center':round(shortest_center_het_ring_to_res_atom,2),'LigAtom to center': round(shortest_center_aa_ring_to_het_atom,2),'Angles':angle_degrees}]) remove_hyd(aaname,hetflag) for charged in ligand_charged[hetflag]: distance = (center - charged[1]).norm() # needs max 4.2 distance to make aromatic+ if distance < 4.2 and charged[2] > 0: if debug: print "Ring #", count, "Distance:", round(distance, 2), "Angle:", round(angle_degrees, 2) summary_results[hetflag]['aromaticion'].append( [aaname, count, round(distance, 2), charged]) #FIXME fragment file new_results[hetflag]['interactions'].append([aaname,'','aro_ion_protein','aromatic (pi-cation)','aromatic','protein',{'Distance':round(distance, 2)}]) remove_hyd(aaname,hetflag) if sum > 2 and aa_resname in CHARGEDAA and ligand_rings[hetflag]: # print "check for charged AA to aromatic # rings!",aa_resname,hetflag for atom in residue: aa_vector = atom.get_vector() aa_atom = atom.name for ring in ligand_rings[hetflag]: d = (ring[2] - aa_vector).norm() # if d<10: print # "aa_atom",aa_atom,aaname,"distance to a # ring",d,hetflag,aa_resname def analyze_interactions(): for ligand, result in results.iteritems(): # print "AA close to ligands ("+ligand+"): ",list(result.keys()) # print "Results for"+ligand sortedresults = [] ligscore = 0 for residue, interaction in result.iteritems(): sum = 0 score = 0 hbond = [] hbondplus = [] type = 'waals' for entry in interaction: hbondconfirmed = [] if entry[2] <= 3.5: # print(entry) # if debug: # print "Likely H-Bond", entry if entry[0][0] == 'C' or entry[1][0] == 'C': continue # If either atom is C then no hydrogen bonding # if entry[1] == 'N': #if residue atom is N, then it is backbone! # print('backbone interaction!') aa_donors = get_hydrogen_from_aa(entry[5]) hydrogenmatch = 0 res_is_acceptor = False res_is_donor = False for donor in aa_donors: d = (donor[1] - entry[4]).norm() if d < 0.5: #print 'found donor in residue',residue,entry,donor hydrogens = donor[2] res_is_acceptor = donor[3] res_is_donor = True for hydrogen in hydrogens: hydrogenvector = hydrogen - donor[1] bindingvector = entry[3] - hydrogen angle = round(degrees(Vector.angle( hydrogenvector, bindingvector)), 2) distance = round(bindingvector.norm(), 2) # print "RESDONOR",residue,"From # ligand",entry[0],"To # AA",entry[1],"HydrogenCheck # angle",angle,"Distance from hydrogen to # acceptor",distance if distance > 2.5: # print "Too far away" continue if angle > 60: # print "Bad angle" continue hydrogenmatch = 1 hbondconfirmed.append( ["D", entry[0], entry[1], angle, distance]) # print "aadonors:",aa_donors found_donor = 0 for donor in ligand_donors[ligand]: d = (donor[1] - entry[3]).norm() # print charged,d,residue,entry if d < 0.5: found_donor = 1 hydrogens = donor[2] for hydrogen in hydrogens: hydrogenvector = hydrogen - donor[1] bindingvector = entry[4] - hydrogen angle = round(degrees(Vector.angle( hydrogenvector, bindingvector)), 2) distance = round(bindingvector.norm(), 2) # print "LIGDONOR",residue,"From # ligand",entry[0],"To # AA",entry[1],"HydrogenCheck # angle",angle,"Distance from hydrogen to # acceptor",distance if distance > 2.5: # print "Too far away" continue if angle > 60: # print "Bad angle" continue hydrogenmatch = 1 hbondconfirmed.append( ["A", entry[0], entry[1], angle, distance]) found_acceptor = 0 for acceptor in ligand_acceptors[ligand]: d = (acceptor[1] - entry[3]).norm() # print charged,d,residue,entry if d < 0.5: found_acceptor = 1 if found_donor==0 and res_is_donor: hydrogenmatch = 1 hbondconfirmed.append(['D']) #set residue as donor #print 'found acceptor which is not donor',residue,entry[0],acceptor if not found_acceptor and found_donor and res_is_acceptor: hydrogenmatch = 1 hbondconfirmed.append(['A']) #set residue as acceptor #print 'donor which is not acceptor',residue,entry[0] if found_acceptor and found_donor: if res_is_donor and not res_is_acceptor: hydrogenmatch = 1 hbondconfirmed.append(['D']) elif not res_is_donor and res_is_acceptor: hydrogenmatch = 1 hbondconfirmed.append(['A']) else: pass #print 'can be both donor and acceptor' chargedcheck = 0 charge_value = 0 res_charge_value = 0 doublechargecheck = 0 for charged in ligand_charged[ligand]: d = (charged[1] - entry[3]).norm() if d < 0.5: # print 'found charge',residue,d,entry chargedcheck = 1 hydrogenmatch = 0 # Replace previous match! charge_value = charged[2] if residue[0:3] in CHARGEDAA: # print "check for hbondplus!",residue,entry # Need to check which atoms, but for now assume charged if chargedcheck: doublechargecheck = 1 chargedcheck = 1 hydrogenmatch = 0 # Replace previous match! if AA[residue[0:3]] in POSITIVE: res_charge_value = 1 elif AA[residue[0:3]] in NEGATIVE: res_charge_value = -1 if entry[1] == 'N': #backbone connection! fragment_file = fragment_library(ligand, entry[3], entry[ 0], entry[5], entry[6], 'HB_backbone') new_results[ligand]['interactions'].append([residue,fragment_file,'polar_backbone','polar (hydrogen bond with backbone)','polar','protein',entry[0],entry[1],entry[2]]) remove_hyd(residue,ligand) elif entry[1] == 'O': #backbone connection! fragment_file = fragment_library(ligand, entry[3], entry[ 0], entry[5], entry[6], 'HB_backbone') new_results[ligand]['interactions'].append([residue,fragment_file,'polar_backbone','polar (hydrogen bond with backbone)','polar','protein',entry[0],entry[1],entry[2]]) remove_hyd(residue,ligand) elif hydrogenmatch: found = 0 fragment_file = fragment_library(ligand, entry[3], entry[ 0], entry[5], entry[6], 'HB') for x in summary_results[ligand]['hbond_confirmed']: if residue == x[0]: # print "Already key there",residue key = summary_results[ligand][ 'hbond_confirmed'].index(x) summary_results[ligand]['hbond_confirmed'][ key][1].extend(hbondconfirmed) found = 1 if hbondconfirmed[0][0]=="D": new_results[ligand]['interactions'].append([residue,fragment_file,'polar_donor_protein','polar (hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]]) remove_hyd(residue,ligand) if hbondconfirmed[0][0]=="A": new_results[ligand]['interactions'].append([residue,fragment_file,'polar_acceptor_protein','polar (hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]]) remove_hyd(residue,ligand) if found == 0: summary_results[ligand]['hbond_confirmed'].append( [residue, hbondconfirmed]) if chargedcheck: type = 'hbondplus' hbondplus.append(entry) elif chargedcheck: type = 'hbondplus' hbondplus.append(entry) fragment_file = fragment_library(ligand, entry[3], entry[ 0], entry[5], entry[6], 'HBC') remove_hyd(residue,ligand) if doublechargecheck: if (res_charge_value>0): new_results[ligand]['interactions'].append([residue,fragment_file,'polar_double_pos_protein','polar (charge-charge)','polar','',entry[0],entry[1],entry[2]]) elif (res_charge_value<0): new_results[ligand]['interactions'].append([residue,fragment_file,'polar_double_neg_protein','polar (charge-charge)','polar','',entry[0],entry[1],entry[2]]) elif (charge_value>0): new_results[ligand]['interactions'].append([residue,fragment_file,'polar_pos_ligand','polar (charge-assisted hydrogen bond)','polar','ligand',entry[0],entry[1],entry[2]]) elif (charge_value<0): new_results[ligand]['interactions'].append([residue,fragment_file,'polar_neg_ligand','polar (charge-assisted hydrogen bond)','polar','ligand',entry[0],entry[1],entry[2]]) else: if (res_charge_value>0): new_results[ligand]['interactions'].append([residue,fragment_file,'polar_pos_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]]) elif (res_charge_value<0): new_results[ligand]['interactions'].append([residue,fragment_file,'polar_neg_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]]) else: new_results[ligand]['interactions'].append([residue,fragment_file,'polar_unknown_protein','polar (charge-assisted hydrogen bond)','polar','protein',entry[0],entry[1],entry[2]]) else: type = 'hbond' hbond.append(entry) fragment_file = fragment_library(ligand, entry[3], entry[ 0], entry[5], entry[6], 'HB') new_results[ligand]['interactions'].append([residue,fragment_file,'polar_unspecified','polar (hydrogen bond)','polar','',entry[0],entry[1],entry[2]]) remove_hyd(residue,ligand) #print type,hbondconfirmed entry[3] = '' if (entry[2] < 4.5): sum += 1 score += 4.5 - entry[2] score = round(score, 2) if type == 'waals' and score > 2: # mainly no hbond detected summary_results[ligand]['waals'].append([residue, score, sum]) elif type == 'hbond': summary_results[ligand]['hbond'].append( [residue, score, sum, hbond]) elif type == 'hbondplus': summary_results[ligand]['hbondplus'].append( [residue, score, sum, hbondplus]) # elif type == 'hbond_confirmed': # summary_results[ligand]['hbond_confirmed'].append([residue,score,sum,hbondconfirmed]) ligscore += score # print "Total <4 (score is combined diff from # 4)",sum,"score",score sortedresults.append([residue, score, sum, hbond, type]) summary_results[ligand]['score'].append([ligscore]) summary_results[ligand]['inchikey'] = inchikeys[ligand] summary_results[ligand]['smiles'] = smiles[ligand] new_results[ligand]['score'] = ligscore new_results[ligand]['inchikey'] = inchikeys[ligand] new_results[ligand]['smiles'] = smiles[ligand] if ligand in hetlist_display: summary_results[ligand]['prettyname'] = hetlist_display[ligand] new_results[ligand]['prettyname'] = hetlist_display[ligand] # print ligand,"Ligand score:"+str(ligscore) sortedresults = sorted(sortedresults, key=itemgetter(1), reverse=True) def pretty_results(): for ligand, result in summary_results.iteritems(): output = '' bindingresidues = [] #output += "Results for "+str(ligand)+"\n" for type, typelist in result.iteritems(): if type == 'waals': continue output += type + "\n" if type == 'waals': typelist = sorted(typelist, key=itemgetter(2), reverse=True) if type == 'hydrophobic': typelist = sorted(typelist, key=itemgetter(1), reverse=True) for entry in typelist: if type != 'score': bindingresidues.append(entry[0]) if type == 'hbond': output += '\t'.join(map(str, entry[0:1])) + '\n' for bond in entry[3]: output += '\t'.join(map(str, bond[0:3])) + '\n' elif type == 'hbondplus': output += '\t'.join(map(str, entry[0:1])) + '\n' for bond in entry[3]: output += '\t'.join(map(str, bond[0:3])) + '\n' elif type == 'hbond_confirmed': output += '\t'.join(map(str, entry[0:1])) + '\n' for bond in entry[1]: output += '\t'.join(map(str, bond)) + '\n' else: # print entry output += '\t'.join(map(str, entry)) + '\n' temp_path = projectdir + 'results/' + pdbname + '/output/' + \ pdbname + '_' + ligand.replace("H_", "") + '.yaml' # yaml.dump(result, open(temp_path, 'w')) yaml.dump(new_results[ligand], open(temp_path, 'w')) if debug: print ligand,'\n',open(temp_path,'r').read() addresiduestoligand(ligand, pdbname, bindingresidues) def calculate_interactions(pdb, session=None, peptide=None): global pdbname, hetlist, hetlist_display, ligand_atoms, ligand_charged, ligandcenter, ligand_rings, ligand_donors, ligand_acceptors, results, sortedresults, summary_results, inchikeys, smiles, projectdir, new_results, peptideligand hetlist = {} hetlist_display = {} ligand_atoms = {} ligand_charged = {} ligandcenter = {} ligand_rings = {} ligand_donors = {} ligand_acceptors = {} results = {} sortedresults = {} summary_results = {} new_results = {} inchikeys = {} smiles = {} peptideligand = peptide if not session: pdbname = pdb # print "checking normal ",pdbname check_pdb() checkdirs() hetlist_display = find_ligand_full_names() create_ligands_and_poseview() build_ligand_info() find_interactions() analyze_interactions() pretty_results() else: pdbname = pdb projectdir = '/tmp/interactions/' + session + "/" checkdirs() hetlist_display = find_ligand_full_names() create_ligands_and_poseview() build_ligand_info() find_interactions() analyze_interactions() pretty_results() def main(argv): pdbname = '' try: # print 'ARGV :', argv opts, args = getopt.getopt(argv, "p:s:c:", ["pdb"]) except getopt.GetoptError as err: print "Remember PDB name -p " print err sys.exit(2) session = None peptide = None for opt, arg in opts: if opt in ("-p"): pdbname = arg elif opt in ("-s"): session = arg elif opt in ("-c"): peptide = arg if not pdbname: print "Remember PDB name -p " sys.exit(2) if session: calculate_interactions(pdbname, session, peptide=peptide) else: calculate_interactions(pdbname, peptide=peptide) if __name__ == "__main__": main(sys.argv[1:]) #pdbname = '1F88' # calculate_interactions(pdbname)
# Copyright 2010 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto from boto import handler from boto.exception import InvalidAclError from boto.gs.acl import ACL, CannedACLStrings from boto.gs.acl import SupportedPermissions as GSPermissions from boto.gs.key import Key as GSKey from boto.s3.acl import Policy from boto.s3.bucket import Bucket as S3Bucket import xml.sax # constants for default object ACL and standard acl in http query args DEF_OBJ_ACL = 'defaultObjectAcl' STANDARD_ACL = 'acl' class Bucket(S3Bucket): def __init__(self, connection=None, name=None, key_class=GSKey): super(Bucket, self).__init__(connection, name, key_class) def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): """sets or changes a bucket's acl. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.""" if isinstance(acl_or_str, Policy): raise InvalidAclError('Attempt to set S3 Policy on GS ACL') elif isinstance(acl_or_str, ACL): self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers) else: self.set_canned_acl(acl_or_str, key_name, headers=headers) def set_def_acl(self, acl_or_str, key_name='', headers=None): """sets or changes a bucket's default object acl""" if isinstance(acl_or_str, Policy): raise InvalidAclError('Attempt to set S3 Policy on GS ACL') elif isinstance(acl_or_str, ACL): self.set_def_xml_acl(acl_or_str.to_xml(), key_name, headers=headers) else: self.set_def_canned_acl(acl_or_str, key_name, headers=headers) def get_acl_helper(self, key_name, headers, query_args): """provides common functionality for get_acl() and get_def_acl()""" response = self.connection.make_request('GET', self.name, key_name, query_args=query_args, headers=headers) body = response.read() if response.status == 200: acl = ACL(self) h = handler.XmlHandler(acl, self) xml.sax.parseString(body, h) return acl else: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def get_acl(self, key_name='', headers=None, version_id=None): """returns a bucket's acl. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.""" return self.get_acl_helper(key_name, headers, STANDARD_ACL) def get_def_acl(self, key_name='', headers=None): """returns a bucket's default object acl""" return self.get_acl_helper(key_name, headers, DEF_OBJ_ACL) def set_canned_acl_helper(self, acl_str, key_name, headers, query_args): """provides common functionality for set_canned_acl() and set_def_canned_acl()""" assert acl_str in CannedACLStrings if headers: headers[self.connection.provider.acl_header] = acl_str else: headers={self.connection.provider.acl_header: acl_str} response = self.connection.make_request('PUT', self.name, key_name, headers=headers, query_args=query_args) body = response.read() if response.status != 200: raise self.connection.provider.storage_response_error( response.status, response.reason, body) def set_canned_acl(self, acl_str, key_name='', headers=None, version_id=None): """sets or changes a bucket's acl to a predefined (canned) value. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.""" return self.set_canned_acl_helper(acl_str, key_name, headers, STANDARD_ACL) def set_def_canned_acl(self, acl_str, key_name='', headers=None): """sets or changes a bucket's default object acl to a predefined (canned) value""" return self.set_canned_acl_helper(acl_str, key_name, headers, query_args=DEF_OBJ_ACL) def set_def_xml_acl(self, acl_str, key_name='', headers=None): """sets or changes a bucket's default object""" return self.set_xml_acl(acl_str, key_name, headers, query_args=DEF_OBJ_ACL) # Method with same signature as boto.s3.bucket.Bucket.add_email_grant(), # to allow polymorphic treatment at application layer. def add_email_grant(self, permission, email_address, recursive=False, headers=None): """ Convenience method that provides a quick way to add an email grant to a bucket. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: (READ, WRITE, FULL_CONTROL). :type email_address: string :param email_address: The email address associated with the GS account your are granting the permission to. :type recursive: boolean :param recursive: A boolean value to controls whether the call will apply the grant to all keys within the bucket or not. The default value is False. By passing a True value, the call will iterate through all keys in the bucket and apply the same grant to each key. CAUTION: If you have a lot of keys, this could take a long time! """ if permission not in GSPermissions: raise self.connection.provider.storage_permissions_error( 'Unknown Permission: %s' % permission) acl = self.get_acl(headers=headers) acl.add_email_grant(permission, email_address) self.set_acl(acl, headers=headers) if recursive: for key in self: key.add_email_grant(permission, email_address, headers=headers) # Method with same signature as boto.s3.bucket.Bucket.add_user_grant(), # to allow polymorphic treatment at application layer. def add_user_grant(self, permission, user_id, recursive=False, headers=None): """ Convenience method that provides a quick way to add a canonical user grant to a bucket. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUTs the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: (READ|WRITE|FULL_CONTROL) :type user_id: string :param user_id: The canonical user id associated with the GS account you are granting the permission to. :type recursive: bool :param recursive: A boolean value to controls whether the call will apply the grant to all keys within the bucket or not. The default value is False. By passing a True value, the call will iterate through all keys in the bucket and apply the same grant to each key. CAUTION: If you have a lot of keys, this could take a long time! """ if permission not in GSPermissions: raise self.connection.provider.storage_permissions_error( 'Unknown Permission: %s' % permission) acl = self.get_acl(headers=headers) acl.add_user_grant(permission, user_id) self.set_acl(acl, headers=headers) if recursive: for key in self: key.add_user_grant(permission, user_id, headers=headers) def add_group_email_grant(self, permission, email_address, recursive=False, headers=None): """ Convenience method that provides a quick way to add an email group grant to a bucket. This method retrieves the current ACL, creates a new grant based on the parameters passed in, adds that grant to the ACL and then PUT's the new ACL back to GS. :type permission: string :param permission: The permission being granted. Should be one of: READ|WRITE|FULL_CONTROL See http://code.google.com/apis/storage/docs/developer-guide.html#authorization for more details on permissions. :type email_address: string :param email_address: The email address associated with the Google Group to which you are granting the permission. :type recursive: bool :param recursive: A boolean value to controls whether the call will apply the grant to all keys within the bucket or not. The default value is False. By passing a True value, the call will iterate through all keys in the bucket and apply the same grant to each key. CAUTION: If you have a lot of keys, this could take a long time! """ if permission not in GSPermissions: raise self.connection.provider.storage_permissions_error( 'Unknown Permission: %s' % permission) acl = self.get_acl(headers=headers) acl.add_group_email_grant(permission, email_address) self.set_acl(acl, headers=headers) if recursive: for key in self: key.add_group_email_grant(permission, email_address, headers=headers) # Method with same input signature as boto.s3.bucket.Bucket.list_grants() # (but returning different object type), to allow polymorphic treatment # at application layer. def list_grants(self, headers=None): acl = self.get_acl(headers=headers) return acl.entries def disable_logging(self, headers=None): xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging/>' self.set_subresource('logging', xml_str, headers=headers) def enable_logging(self, target_bucket, target_prefix=None, headers=None): if isinstance(target_bucket, Bucket): target_bucket = target_bucket.name xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging>' xml_str = (xml_str + '<LogBucket>%s</LogBucket>' % target_bucket) if target_prefix: xml_str = (xml_str + '<LogObjectPrefix>%s</LogObjectPrefix>' % target_prefix) xml_str = xml_str + '</Logging>' self.set_subresource('logging', xml_str, headers=headers)
# Copyright (c) 2013-2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from barbican.model import models from barbican.openstack.common import jsonutils as json from barbican.tests import utils class WhenCreatingNewSecret(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewSecret, self).setUp() self.parsed_secret = {'name': 'name', 'algorithm': 'algorithm', 'bit_length': 512, 'mode': 'mode', 'plain_text': 'not-encrypted'} self.parsed_order = {'secret': self.parsed_secret} def test_new_secret_is_created_from_dict(self): date_time = datetime.datetime.now().isoformat() self.parsed_secret['expiration'] = date_time secret = models.Secret(self.parsed_secret) self.assertEqual(secret.name, self.parsed_secret['name']) self.assertEqual(secret.algorithm, self.parsed_secret['algorithm']) self.assertEqual(secret.bit_length, self.parsed_secret['bit_length']) self.assertEqual(secret.mode, self.parsed_secret['mode']) self.assertIsInstance(secret.expiration, datetime.datetime) self.assertEqual(secret.created_at, secret.updated_at) class WhenCreatingNewOrder(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewOrder, self).setUp() self.parsed_order = { 'type': 'certificate', 'meta': { 'email': 'email@email.com' }, 'sub_status': 'Pending', 'sub_status_message': 'Waiting for instructions...' } def test_new_order_is_created(self): order = models.Order(self.parsed_order) self.assertEqual(order.type, self.parsed_order['type']) self.assertEqual(order.meta, self.parsed_order['meta']) self.assertEqual(order.sub_status, self.parsed_order['sub_status']) self.assertEqual( order.sub_status_message, self.parsed_order['sub_status_message'] ) class WhenCreatingNewContainer(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewContainer, self).setUp() self.parsed_container = {'name': 'name', 'type': 'generic', 'secret_refs': [ {'name': 'test secret 1', 'secret_ref': '123'}, {'name': 'test secret 2', 'secret_ref': '123'}, {'name': 'test secret 3', 'secret_ref': '123'} ]} def test_new_container_is_created_from_dict(self): container = models.Container(self.parsed_container) self.assertEqual(container.name, self.parsed_container['name']) self.assertEqual(container.type, self.parsed_container['type']) self.assertEqual(len(container.container_secrets), len(self.parsed_container['secret_refs'])) self.assertEqual(container.container_secrets[0].name, self.parsed_container['secret_refs'][0]['name']) self.assertEqual(container.container_secrets[0].secret_id, self.parsed_container['secret_refs'][0]['secret_ref']) self.assertEqual(container.container_secrets[1].name, self.parsed_container['secret_refs'][1]['name']) self.assertEqual(container.container_secrets[1].secret_id, self.parsed_container['secret_refs'][1]['secret_ref']) self.assertEqual(container.container_secrets[2].name, self.parsed_container['secret_refs'][2]['name']) self.assertEqual(container.container_secrets[2].secret_id, self.parsed_container['secret_refs'][2]['secret_ref']) def test_new_certificate_container_is_created_from_dict(self): self.parsed_container['type'] = 'certificate' container = models.Container(self.parsed_container) self.assertEqual(container.name, self.parsed_container['name']) self.assertEqual(container.type, self.parsed_container['type']) self.assertEqual(len(container.container_secrets), len(self.parsed_container['secret_refs'])) self.assertEqual(container.container_secrets[0].name, self.parsed_container['secret_refs'][0]['name']) self.assertEqual(container.container_secrets[0].secret_id, self.parsed_container['secret_refs'][0]['secret_ref']) self.assertEqual(container.container_secrets[1].name, self.parsed_container['secret_refs'][1]['name']) self.assertEqual(container.container_secrets[1].secret_id, self.parsed_container['secret_refs'][1]['secret_ref']) self.assertEqual(container.container_secrets[2].name, self.parsed_container['secret_refs'][2]['name']) self.assertEqual(container.container_secrets[2].secret_id, self.parsed_container['secret_refs'][2]['secret_ref']) def test_parse_secret_ref_uri(self): self.parsed_container['secret_refs'][0]['secret_ref'] = ( 'http://localhost:9110/123/secrets/123456') container = models.Container(self.parsed_container) self.assertEqual(container.container_secrets[0].secret_id, '123456') self.parsed_container['secret_refs'][0]['secret_ref'] = ( 'http://localhost:9110/123/secrets/123456/') container = models.Container(self.parsed_container) self.assertEqual(container.container_secrets[0].secret_id, '123456') class WhenCreatingNewConsumer(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewConsumer, self).setUp() self.parsed_consumer = {'name': 'name', 'URL': 'URL'} self.container_id = '12345container' def test_new_consumer_is_created_from_dict(self): consumer = models.ContainerConsumerMetadatum(self.container_id, self.parsed_consumer) self.assertEqual(consumer.name, self.parsed_consumer['name']) self.assertEqual(consumer.URL, self.parsed_consumer['URL']) self.assertEqual(consumer.status, models.States.ACTIVE) def test_new_consumer_has_correct_hash(self): consumer_one = models.ContainerConsumerMetadatum(self.container_id, self.parsed_consumer) consumer_two = models.ContainerConsumerMetadatum(self.container_id, self.parsed_consumer) different_container = '67890container' consumer_three = models.ContainerConsumerMetadatum( different_container, self.parsed_consumer) self.assertEqual(consumer_one.data_hash, consumer_two.data_hash) self.assertNotEqual(consumer_one.data_hash, consumer_three.data_hash) class WhenProcessingJsonBlob(utils.BaseTestCase): def setUp(self): super(WhenProcessingJsonBlob, self).setUp() self.json_blob = models.JsonBlob() def test_process_bind_param_w_dict(self): res = self.json_blob.process_bind_param({'test': True}, None) self.assertEqual(res, '{"test": true}') def test_process_result_value_w_json_str(self): res = self.json_blob.process_result_value('{"test": true}', None) self.assertTrue(res.get('test')) class WhenCreatingOrderRetryTask(utils.BaseTestCase): def test_create_new_order_task(self): order = models.Order({ 'type': 'certificate', 'meta': { 'email': 'email@email.com' }, 'sub_status': 'Pending', 'sub_status_message': 'Waiting for instructions...' }) at = datetime.datetime.utcnow() order_retry_task = models.OrderRetryTask( order_id=order.id, retry_task="foobar", retry_at=at, retry_args=json.dumps(["one", "two"]), retry_kwargs=json.dumps({"three": "four"}), ) self.assertEqual(order_retry_task.order_id, order.id) self.assertEqual(order_retry_task.retry_task, "foobar") self.assertEqual(order_retry_task.retry_at, at) self.assertEqual( order_retry_task.retry_args, json.dumps(["one", "two"]), ) self.assertEqual( order_retry_task.retry_kwargs, json.dumps({"three": "four"}), ) def test_get_retry_params(self): order_retry_task = models.OrderRetryTask( retry_args=json.dumps(["one", "two"]), retry_kwargs=json.dumps({"three": "four"}), ) self.assertEqual( order_retry_task.get_retry_params(), (["one", "two"], {"three": "four"}), ) class WhenCreatingNewCertificateAuthority(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewCertificateAuthority, self).setUp() expiration = (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)) self.parsed_ca = {'plugin_name': 'dogtag_plugin', 'plugin_ca_id': 'ca_master', 'expiration': expiration.isoformat(), 'name': 'Dogtag CA', 'description': 'Master CA for Dogtag plugin', 'ca_signing_certificate': 'XXXXX', 'intermediates': 'YYYYY'} def test_new_ca_is_created_from_dict(self): ca = models.CertificateAuthority(self.parsed_ca) self.assertEqual(self.parsed_ca['plugin_name'], ca.plugin_name) self.assertEqual(self.parsed_ca['plugin_ca_id'], ca.plugin_ca_id) self.assertEqual(self.parsed_ca['name'], ca.ca_meta['name'].value) self.assertEqual(self.parsed_ca['description'], ca.ca_meta['description'].value) self.assertEqual(self.parsed_ca['ca_signing_certificate'], ca.ca_meta['ca_signing_certificate'].value) self.assertEqual(self.parsed_ca['intermediates'], ca.ca_meta['intermediates'].value) self.assertIsInstance(ca.expiration, datetime.datetime) self.assertEqual(ca.created_at, ca.updated_at) class WhenCreatingNewProjectCertificateAuthority(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewProjectCertificateAuthority, self).setUp() expiration = (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)) self.parsed_ca = {'plugin_name': 'dogtag_plugin', 'plugin_ca_id': 'ca_master', 'expiration': expiration.isoformat(), 'name': 'Dogtag CA', 'description': 'Master CA for Dogtag plugin', 'ca_signing_certificate': 'XXXXX', 'intermediates': 'YYYYY'} def test_create_new_project_ca(self): ca = models.CertificateAuthority(self.parsed_ca) ca.id = '67890' project = models.Project() project.id = '12345' project_ca = models.ProjectCertificateAuthority(project.id, ca.id) self.assertEqual(ca.id, project_ca.ca_id) self.assertEqual(project.id, project_ca.project_id) class WhenCreatingNewPreferredCertificateAuthority(utils.BaseTestCase): def setUp(self): super(WhenCreatingNewPreferredCertificateAuthority, self).setUp() expiration = (datetime.datetime.utcnow() + datetime.timedelta(minutes=10)) self.parsed_ca = {'plugin_name': 'dogtag_plugin', 'plugin_ca_id': 'ca_master', 'expiration': expiration.isoformat(), 'name': 'Dogtag CA', 'description': 'Master CA for Dogtag plugin', 'ca_signing_certificate': 'XXXXX', 'intermediates': 'YYYYY'} def test_create_new_preferred_ca(self): ca = models.CertificateAuthority(self.parsed_ca) ca.id = '67890' project = models.Project() project.id = '12345' preferred_ca = models.PreferredCertificateAuthority(project.id, ca.id) self.assertEqual(ca.id, preferred_ca.ca_id) self.assertEqual(project.id, preferred_ca.project_id)
from pprint import pprint import pytest from etcdb.sqlparser.parser import SQLParser @pytest.fixture def parser(): return SQLParser() def test_select_version(parser): tree = parser.parse('SELECT VERSION()') assert tree.query_type == "SELECT" assert tree.expressions == [ { 'type': 'function', 'name': 'VERSION' } ] def test_select_version_i(parser): tree = parser.parse('select version()') assert tree.query_type == "SELECT" assert tree.expressions == [ { 'type': 'function', 'name': 'VERSION' } ] @pytest.mark.parametrize('query,table,fields', [ ( """CREATE TABLE `django_migrations` ( `id` INTEGER AUTO_INCREMENT NOT NULL PRIMARY KEY, `app` VARCHAR(255) NOT NULL, `name` VARCHAR(255) NOT NULL, `applied` DATETIME NOT NULL)""", 'django_migrations', { 'id': { 'type': 'INTEGER', 'options': { 'auto_increment': True, 'nullable': False, 'primary': True } }, 'app': { 'type': 'VARCHAR', 'options': { 'nullable': False } }, 'name': { 'type': 'VARCHAR', 'options': { 'nullable': False } }, 'applied': { 'type': 'DATETIME', 'options': { 'nullable': False } } } ), ( 'CREATE TABLE `auth_group_permissions` (' '`id` INTEGER AUTO_INCREMENT NOT NULL PRIMARY KEY, ' '`group_id` INTEGER NOT NULL, ' '`permission_id` INTEGER NOT NULL)', 'auth_group_permissions', { 'id': { 'type': 'INTEGER', 'options': { 'auto_increment': True, 'nullable': False, 'primary': True } }, 'group_id': { 'type': 'INTEGER', 'options': { 'nullable': False } }, 'permission_id': { 'type': 'INTEGER', 'options': { 'nullable': False } } } ), ( 'CREATE TABLE `t1` (' '`id` INT)', 't1', { 'id': { 'type': 'INT', 'options': { 'nullable': True } } } ), ( 'CREATE TABLE t2 (' '`id` INT)', 't2', { 'id': { 'type': 'INT', 'options': { 'nullable': True } } } ), ( 'CREATE TABLE 1t (' '`id` INT)', '1t', { 'id': { 'type': 'INT', 'options': { 'nullable': True } } } ), ( 'CREATE TABLE 1t1 (' '`id` INT)', '1t1', { 'id': { 'type': 'INT', 'options': { 'nullable': True } } } ), ( """ CREATE TABLE `django_admin_log` ( `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `action_time` datetime NOT NULL, `object_id` longtext NULL, `object_repr` varchar(200) NOT NULL, `action_flag` smallint UNSIGNED NOT NULL, `change_message` longtext NOT NULL, `content_type_id` integer NULL, `user_id` integer NOT NULL) """, 'django_admin_log', { 'id': { 'type': 'INTEGER', 'options': { 'auto_increment': True, 'nullable': False, 'primary': True } }, 'action_time': { 'type': 'DATETIME', 'options': { 'nullable': False, } }, 'object_id': { 'type': 'LONGTEXT', 'options': { 'nullable': True, } }, 'object_repr': { 'type': 'VARCHAR', 'options': { 'nullable': False, } }, 'action_flag': { 'type': 'SMALLINT', 'options': { 'nullable': False } }, 'change_message': { 'type': 'LONGTEXT', 'options': { 'nullable': False } }, 'content_type_id': { 'type': 'INTEGER', 'options': { 'nullable': True } }, 'user_id': { 'type': 'INTEGER', 'options': { 'nullable': False } } } ), ( """ CREATE TABLE `auth_user` ( `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `password` varchar(128) NOT NULL, `last_login` datetime(6) NOT NULL, `is_superuser` bool NOT NULL, `username` varchar(30) NOT NULL UNIQUE, `first_name` varchar(30) NOT NULL, `last_name` varchar(30) NOT NULL, `email` varchar(75) NOT NULL, `is_staff` bool NOT NULL, `is_active` bool NOT NULL, `date_joined` datetime(6) NOT NULL) """, 'auth_user', { 'id': { 'type': 'INTEGER', 'options': { 'auto_increment': True, 'nullable': False, 'primary': True } }, 'password': { 'type': 'VARCHAR', 'options': { 'nullable': False, } }, 'last_login': { 'type': 'DATETIME', 'options': { 'nullable': False, } }, 'is_superuser': { 'type': 'BOOL', 'options': { 'nullable': False, } }, 'username': { 'type': 'VARCHAR', 'options': { 'unique': True, 'nullable': False } }, 'first_name': { 'type': 'VARCHAR', 'options': { 'nullable': False, } }, 'last_name': { 'type': 'VARCHAR', 'options': { 'nullable': False, } }, 'email': { 'type': 'VARCHAR', 'options': { 'nullable': False, } }, 'is_staff': { 'type': 'BOOL', 'options': { 'nullable': False, } }, 'is_active': { 'type': 'BOOL', 'options': { 'nullable': False, } }, 'date_joined': { 'type': 'DATETIME', 'options': { 'nullable': False, } } } ), ( "CREATE TABLE `auth_group` (" "`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " "`name` varchar(80) NOT NULL UNIQUE)", 'auth_group', { 'id': { 'type': 'INTEGER', 'options': { 'auto_increment': True, 'nullable': False, 'primary': True } }, 'name': { 'type': 'VARCHAR', 'options': { 'unique': True, 'nullable': False } } } ) ]) def test_create_table(query, table, fields, parser): tree = parser.parse(query) assert tree.query_type == "CREATE_TABLE" assert tree.table == table pprint(tree.fields) assert tree.fields == fields def test_create_database(parser): query = "CREATE DATABASE `foo`" tree = parser.parse(query) assert tree.query_type == "CREATE_DATABASE" assert tree.db == 'foo' def test_show_databases(parser): query = "SHOW DATABASES" tree = parser.parse(query) assert tree.query_type == "SHOW_DATABASES" def test_show_tables(parser): query = "SHOW TABLES" tree = parser.parse(query) assert tree.query_type == "SHOW_TABLES" assert not tree.options['full'] assert tree.success def test_show_full_tables(parser): query = "SHOW FULL TABLES" tree = parser.parse(query) assert tree.query_type == "SHOW_TABLES" assert tree.options['full'] assert tree.success def test_use_database(parser): query = "USE `foo`" tree = parser.parse(query) assert tree.query_type == "USE_DATABASE" assert tree.db == 'foo' def test_create_table_int(parser): tree = parser.parse('CREATE TABLE t(id INT)') assert tree.success def test_select_fields_from(parser): query = """SELECT `django_migrations`.`app`, `django_migrations`.`name` FROM `django_migrations`""" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == "django_migrations" print(tree.expressions) assert tree.expressions == [ { 'type': 'field', 'table_name': 'django_migrations', 'name': 'app' }, { 'type': 'field', 'table_name': 'django_migrations', 'name': 'name' } ] def test_select_short_fields_from(parser): query = "SELECT app, foo FROM `django_migrations`" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == "django_migrations" print(tree.expressions) assert tree.expressions == [ { 'type': 'field', 'name': 'app' }, { 'type': 'field', 'name': 'foo' } ] def test_select_short_q_fields_from(parser): query = "SELECT `app`, `foo` FROM `django_migrations`" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == "django_migrations" print(tree.expressions) assert tree.expressions == [ { 'type': 'field', 'name': 'app' }, { 'type': 'field', 'name': 'foo' } ] def test_select_two_func(parser): query = "SELECT VERSION(), VERSION()" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" print(tree.expressions) assert tree.expressions == [ { 'type': 'function', 'name': 'VERSION' }, { 'type': 'function', 'name': 'VERSION' } ] def test_select_var(parser): query = "SELECT @@sql_mode" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.expressions == [ { 'type': 'variable', 'name': 'SQL_MODE' } ] def test_select_var_SQL_AUTO_IS_NULL(parser): query = "SELECT @@SQL_AUTO_IS_NULL" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.expressions == [ { 'type': 'variable', 'name': 'SQL_AUTO_IS_NULL' } ] def test_commit(parser): query = "COMMIT" tree = parser.parse(query) assert tree.success assert tree.query_type == "COMMIT" def test_select_from_tbl(parser): query = """SELECT f1 FROM t1""" tree = parser.parse(query) assert tree.success assert tree.table == 't1' assert tree.db is None assert tree.expressions == [ { 'type': 'field', 'name': 'f1' } ] def test_select_from_db_tbl(parser): query = """SELECT f1, f2 FROM d1.t1""" tree = parser.parse(query) assert tree.success assert tree.table == 't1' assert tree.db == 'd1' assert tree.expressions == [ { 'type': 'field', 'name': 'f1' }, { 'type': 'field', 'name': 'f2' } ] @pytest.mark.parametrize('query,table,expressions,where', [ ( "SELECT f1, f2 FROM t1 WHERE f1 = 'foo'", 't1', [ { 'type': 'field', 'name': 'f1' }, { 'type': 'field', 'name': 'f2' } ], ('bool_primary', ('=', ('IDENTIFIER', 'f1'), ('STRING', 'foo'))) ), ( """ SELECT `id`, `app_label`, `model` FROM `django_content_type` WHERE `model` = 'logentry' AND `app_label` = 'admin' """, 'django_content_type', [ { 'type': 'field', 'name': 'id' }, { 'type': 'field', 'name': 'app_label' }, { 'type': 'field', 'name': 'model' } ], ( 'AND', ('bool_primary', ('=', ('IDENTIFIER', 'model'), ('STRING', 'logentry'))), ('bool_primary', ('=', ('IDENTIFIER', 'app_label'), ('STRING', 'admin'))) ) ), ( """ SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`model` = 'logentry' AND `django_content_type`.`app_label` = 'admin' """, 'django_content_type', [ { 'type': 'field', 'name': 'id', 'table_name': 'django_content_type' }, { 'type': 'field', 'name': 'app_label', 'table_name': 'django_content_type' }, { 'type': 'field', 'name': 'model', 'table_name': 'django_content_type' } ], ( 'AND', ('bool_primary', ('=', ('IDENTIFIER', 'django_content_type.model'), ('STRING', 'logentry'))), ('bool_primary', ('=', ('IDENTIFIER', 'django_content_type.app_label'), ('STRING', 'admin'))) ) ) ]) def test_select_from_tbl_where(parser, query, table, expressions, where): print(query) tree = parser.parse(query) assert tree.success assert tree.table == table assert tree.db is None assert tree.expressions == expressions print(query) print('Expected:') pprint(where) print('Got:') pprint(tree.where) assert tree.where == where @pytest.mark.parametrize('autocommit', [ 0, 1 ]) def test_set_autocommit(parser, autocommit): query = "set autocommit=%d" % autocommit tree = parser.parse(query) assert tree.success assert tree.query_type == "SET_AUTOCOMMIT" assert tree.options['autocommit'] == autocommit def test_select_cols_with_tbl(parser): query = "SELECT `django_migrations`.`app`, `django_migrations`.`name` FROM `django_migrations`" tree = parser.parse(query) assert tree.success assert tree.table == 'django_migrations' assert tree.db is None assert tree.expressions == [ { 'type': 'field', 'name': 'app', 'table_name': 'django_migrations' }, { 'type': 'field', 'name': 'name', 'table_name': 'django_migrations' } ] def test_set_names(parser): query = "SET NAMES utf8" tree = parser.parse(query) assert tree.success assert tree.query_type == "SET_NAMES" def test_insert(parser): query = "INSERT INTO `django_migrations` (`app`, `name`, `applied`) " \ "VALUES ('auth', '0003_alter_user_email_max_length', '2016-09-30 22:01:02.851495')" tree = parser.parse(query) assert tree.success assert tree.table == 'django_migrations' assert tree.query_type == "INSERT" assert tree.fields == { 'app': 'auth', 'name': '0003_alter_user_email_max_length', 'applied': '2016-09-30 22:01:02.851495' } def test_drop_database(parser): query = "DROP DATABASE foo" tree = parser.parse(query) assert tree.success assert tree.query_type == "DROP_DATABASE" assert tree.db == 'foo' def test_desc_table(parser): query = "desc foo" tree = parser.parse(query) assert tree.success assert tree.query_type == "DESC_TABLE" assert tree.table == 'foo' assert tree.db is None @pytest.mark.parametrize('query', [ """SELECT `django_session`.`session_key`, `django_session`.`session_data`, `django_session`.`expire_date` FROM `django_session` WHERE ( `django_session`.`session_key` = '5ly6avjqb20gmxav35soqnscb13iltd6' AND `django_session`.`expire_date` > '2016-10-10 18:23:18' ) """, """SELECT `django_session`.`session_key`, `django_session`.`session_data`, `django_session`.`expire_date` FROM `django_session` WHERE ( `django_session`.`session_key` = '5ly6avjqb20gmxav35soqnscb13iltd6' AND `django_session`.`expire_date` < '2016-10-10 18:23:18' ) """ ]) def test_select_with_more_in_where(query, parser): tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == 'django_session' assert tree.db is None pprint(tree.where) # assert 0 def test_select_with_limit(parser): query = "SELECT `auth_user`.`id`, `auth_user`.`password`, `auth_user`.`last_login`, `auth_user`.`is_superuser`, `auth_user`.`username`, `auth_user`.`first_name`, `auth_user`.`last_name`, `auth_user`.`email`, `auth_user`.`is_staff`, `auth_user`.`is_active`, `auth_user`.`date_joined` " \ "FROM `auth_user` WHERE `auth_user`.`username` = 'admin' LIMIT 21" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == 'auth_user' assert tree.db is None assert tree.limit == 21 def test_insert_create_super_user(parser): query = """INSERT INTO `auth_user` (`password`, `last_login`, `is_superuser`, `username`, `first_name`, `last_name`, `email`, `is_staff`, `is_active`, `date_joined`) VALUES ('pbkdf2_sha256$30000$rpn3UE9RjKsE$MNBqf1DSWdT2qFFzZ/h++60/pga3xBFUk0QrokaBqtY=', 'None', 'True', 'admin', 'John', 'Smith', 'john.smith@aaa.com', 'True', 'True', '2016-10-10 19:03:31') """ tree = parser.parse(query) assert tree.success assert tree.query_type == "INSERT" assert tree.table == 'auth_user' assert tree.db is None def test_select_one_parenthesis(parser): query = """ SELECT (1) AS `a` FROM `django_session` WHERE `django_session`.`session_key` = 'mydfqwi234umfggb32s6hsc2vb4ewbpv' LIMIT 1 """ tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == 'django_session' assert tree.db is None def test_update(parser): query = "UPDATE `auth_user` SET `last_login` = '2016-10-10 19:19:56' WHERE `auth_user`.`id` = '2'" tree = parser.parse(query) assert tree.success assert tree.query_type == "UPDATE" assert tree.table == 'auth_user' assert tree.db is None def test_select_count_star(parser): query = "SELECT COUNT(*) AS `__count` FROM `foo_config`" tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == 'foo_config' assert tree.db is None assert tree.expressions == [ { 'type': 'function', 'name': 'COUNT' } ] @pytest.mark.parametrize('query,direction', [ ("SELECT foo FROM bar ORDER BY `foo_config`.`foo` ASC LIMIT 1", 'ASC'), ("SELECT foo FROM bar ORDER BY foo ASC", 'ASC'), ("SELECT foo FROM bar ORDER BY foo ASC LIMIT 1", 'ASC'), ("SELECT foo FROM bar ORDER BY foo DESC", 'DESC'), ("SELECT foo FROM bar ORDER BY foo", 'ASC') ]) def test_select_order(query, direction, parser): tree = parser.parse(query) assert tree.success assert tree.query_type == "SELECT" assert tree.table == 'bar' assert tree.db is None assert tree.expressions == [ { 'type': 'field', 'name': 'foo' } ] assert tree.order['by'] == 'foo' assert tree.order['direction'] == direction
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """SimpleStepSizeAdaptation TransitionKernel.""" import collections import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import prefer_static from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import unnest from tensorflow_probability.python.math.generic import reduce_logmeanexp from tensorflow_probability.python.mcmc import kernel as kernel_base from tensorflow_probability.python.mcmc.internal import util as mcmc_util def hmc_like_step_size_setter_fn(kernel_results, new_step_size): """Setter for `step_size` so it can be adapted.""" return unnest.replace_innermost(kernel_results, step_size=new_step_size) def hmc_like_step_size_getter_fn(kernel_results): """Getter for `step_size` so it can be inspected.""" return unnest.get_innermost(kernel_results, 'step_size') def hmc_like_log_accept_prob_getter_fn(kernel_results): log_accept_ratio = unnest.get_innermost(kernel_results, 'log_accept_ratio') safe_accept_ratio = tf.where( tf.math.is_finite(log_accept_ratio), log_accept_ratio, tf.constant(-np.inf, dtype=log_accept_ratio.dtype)) return tf.minimum(safe_accept_ratio, 0.) def get_differing_dims(a, b): # Get the indices of dimensions where shapes of `a` and `b` differ. # `a` is allowed to have fewer dimensions than `b`. if (not tensorshape_util.is_fully_defined(a.shape) or not tensorshape_util.is_fully_defined(b.shape)): return tf.where( tf.not_equal(tf.shape(a), tf.shape(b)[:tf.rank(a)]))[:, 0] a_shape = np.int32(a.shape) b_shape = np.int32(b.shape) return np.where(a_shape != b_shape[:len(a_shape)])[0] class SimpleStepSizeAdaptationResults( mcmc_util.PrettyNamedTupleMixin, collections.namedtuple( 'SimpleStepSizeAdaptationResults', [ 'inner_results', 'target_accept_prob', 'adaptation_rate', 'step', 'new_step_size', ])): """Results of the SimpleStepSizeAdaptation TransitionKernel. Attributes: inner_results: Results of the inner kernel. target_accept_prob: Floating point scalar `Tensor`. Target accept probability. adaptation_rate: Floating point scalar `Tensor`. Fraction by which to adjust the step size during each step. step: Int32 scalar `Tensor`. The current step number as perceived by this kernel. Increases by 1 for every call to `one_step`. new_step_size: Floating point scalar `Tensor` or a list thereof (one for each `state_part`). Step size that will be passed to the inner kernel during the next step. """ __slots__ = () class SimpleStepSizeAdaptation(kernel_base.TransitionKernel): """Adapts the inner kernel's `step_size` based on `log_accept_prob`. The simple policy multiplicatively increases or decreases the `step_size` of the inner kernel based on the value of `log_accept_prob`. It is based on [equation 19 of Andrieu and Thoms (2008)][1]. Given enough steps and small enough `adaptation_rate` the median of the distribution of the acceptance probability will converge to the `target_accept_prob`. A good target acceptance probability depends on the inner kernel. If this kernel is `HamiltonianMonteCarlo`, then 0.6-0.9 is a good range to aim for. For `RandomWalkMetropolis` this should be closer to 0.25. See the individual kernels' docstrings for guidance. In general, adaptation prevents the chain from reaching a stationary distribution, so obtaining consistent samples requires `num_adaptation_steps` be set to a value [somewhat smaller][2] than the number of burnin steps. However, it may sometimes be helpful to set `num_adaptation_steps` to a larger value during development in order to inspect the behavior of the chain during adaptation. The step size is assumed to broadcast with the chain state, potentially having leading dimensions corresponding to multiple chains. When there are fewer of those leading dimensions than there are chain dimensions, the corresponding dimensions in the `log_accept_prob` are averaged (in the direct space, rather than the log space) before being used to adjust the step size. This means that this kernel can do both cross-chain adaptation, or per-chain step size adaptation, depending on the shape of the step size. For example, if your problem has a state with shape `[S]`, your chain state has shape `[C0, C1, Y]` (meaning that there are `C0 * C1` total chains) and `log_accept_prob` has shape `[C0, C1]` (one acceptance probability per chain), then depending on the shape of the step size, the following will happen: - Step size has shape [], [S] or [1], the `log_accept_prob` will be averaged across its `C0` and `C1` dimensions. This means that you will learn a shared step size based on the mean acceptance probability across all chains. This can be useful if you don't have a lot of steps to adapt and want to average away the noise. - Step size has shape [C1, 1] or [C1, S], the `log_accept_prob` will be averaged across its `C0` dimension. This means that you will learn a shared step size based on the mean acceptance probability across chains that share the coordinate across the `C1` dimension. This can be useful when the `C1` dimension indexes different distributions, while `C0` indexes replicas of a single distribution, all sampled in parallel. - Step size has shape [C0, C1, 1] or [C0, C1, S], then no averaging will happen. This means that each chain will learn its own step size. This can be useful when all chains are sampling from different distributions. Even when all chains are for the same distribution, this can help during the initial warmup period. - Step size has shape [C0, 1, 1] or [C0, 1, S], the `log_accept_prob` will be averaged across its `C1` dimension. This means that you will learn a shared step size based on the mean acceptance probability across chains that share the coordinate across the `C0` dimension. This can be useful when the `C0` dimension indexes different distributions, while `C1` indexes replicas of a single distribution, all sampled in parallel. By default, the averaging function used above is the arithmetic mean, which is not robust to stuck chains (e.g. average of one chain with `p_accept = 0` and three chains with `p_accept = 1` will result in an average `p_accept = 0.75`, which will cause this kernel keep the step size roughly the same rather than reducing it to unstick the stuck chain). A more robust choice would be to set `reduce_fn` argument to `tfp.math.reduce_log_harmonic_mean_exp` [3]. Note, however, that the harmonic mean of a set of numbers is usually smaller than the arithmetic mean, so its use will typically produce smaller than optimal step sizes even for well behaved target distributions. #### Examples ```python import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions target_log_prob_fn = tfd.Normal(loc=0., scale=1.).log_prob num_burnin_steps = 500 num_results = 500 num_chains = 64 step_size = 0.1 # Or, if you want per-chain step size: # step_size = tf.fill([num_chains], step_size) kernel = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_log_prob_fn, num_leapfrog_steps=2, step_size=step_size) kernel = tfp.mcmc.SimpleStepSizeAdaptation( inner_kernel=kernel, num_adaptation_steps=int(num_burnin_steps * 0.8)) # The chain will be stepped for num_results + num_burnin_steps, adapting for # the first num_adaptation_steps. samples, [step_size, log_accept_ratio] = tfp.mcmc.sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=tf.zeros(num_chains), kernel=kernel, trace_fn=lambda _, pkr: [pkr.inner_results.accepted_results.step_size, pkr.inner_results.log_accept_ratio]) # ~0.75 p_accept = tf.math.exp(tfp.math.reduce_logmeanexp( tf.minimum(log_accept_ratio, 0.))) ``` #### References [1]: Andrieu, Christophe, Thoms, Johannes. A tutorial on adaptive MCMC. _Statistics and Computing_, 2008. https://people.eecs.berkeley.edu/~jordan/sail/readings/andrieu-thoms.pdf [2]: http://andrewgelman.com/2017/12/15/burn-vs-warm-iterative-simulation-algorithms/#comment-627745 [3]: Hoffman, M., Radul, A., & Sountsov, P. An Adaptive MCMC Scheme for Setting Trajectory Lengths in Hamiltonian Monte Carlo, 2020. In preparation. """ def __init__(self, inner_kernel, num_adaptation_steps, target_accept_prob=0.75, adaptation_rate=0.01, step_size_setter_fn=hmc_like_step_size_setter_fn, step_size_getter_fn=hmc_like_step_size_getter_fn, log_accept_prob_getter_fn=hmc_like_log_accept_prob_getter_fn, reduce_fn=reduce_logmeanexp, experimental_reduce_chain_axis_names=None, validate_args=False, name=None): """Creates the step size adaptation kernel. The default setter_fn and the getter_fn callbacks assume that the inner kernel produces kernel results structurally the same as the `HamiltonianMonteCarlo` kernel. Args: inner_kernel: `TransitionKernel`-like object. num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to during which to adjust the step size. This may be greater, less than, or equal to the number of burnin steps. target_accept_prob: A floating point `Tensor` representing desired acceptance probability. Must be a positive number less than 1. This can either be a scalar, or have shape [num_chains]. Default value: `0.75` (the [center of asymptotically optimal rate for HMC][1]). adaptation_rate: `Tensor` representing amount to scale the current `step_size`. step_size_setter_fn: A callable with the signature `(kernel_results, new_step_size) -> new_kernel_results` where `kernel_results` are the results of the `inner_kernel`, `new_step_size` is a `Tensor` or a nested collection of `Tensor`s with the same structure as returned by the `step_size_getter_fn`, and `new_kernel_results` are a copy of `kernel_results` with the step size(s) set. step_size_getter_fn: A callable with the signature `(kernel_results) -> step_size` where `kernel_results` are the results of the `inner_kernel`, and `step_size` is a floating point `Tensor` or a nested collection of such `Tensor`s. log_accept_prob_getter_fn: A callable with the signature `(kernel_results) -> log_accept_prob` where `kernel_results` are the results of the `inner_kernel`, and `log_accept_prob` is a floating point `Tensor`. `log_accept_prob` can either be a scalar, or have shape [num_chains]. If it's the latter, `step_size` should also have the same leading dimension. reduce_fn: A callable with signature `(input_tensor, axis, keepdims) -> tensor` that returns a log-reduction of `log_accept_prob`, typically some sort of mean. By default, this performs an arithmetic mean. experimental_reduce_chain_axis_names: A `str` or list of `str`s indicating the named axes that should additionally reduced during the log-reduction of `log_accept_prob`. validate_args: Python `bool`. When `True` kernel parameters are checked for validity. When `False` invalid inputs may silently render incorrect outputs. name: Python `str` name prefixed to Ops created by this class. Default: 'simple_step_size_adaptation'. #### References [1]: Betancourt, M. J., Byrne, S., & Girolami, M. (2014). _Optimizing The Integrator Step Size for Hamiltonian Monte Carlo_. http://arxiv.org/abs/1411.6669 """ inner_kernel = mcmc_util.enable_store_parameters_in_results(inner_kernel) with tf.name_scope(mcmc_util.make_name( name, 'simple_step_size_adaptation', '__init__')) as name: dtype = dtype_util.common_dtype([target_accept_prob, adaptation_rate], tf.float32) target_accept_prob = tf.convert_to_tensor( target_accept_prob, dtype=dtype, name='target_accept_prob') adaptation_rate = tf.convert_to_tensor( adaptation_rate, dtype=dtype, name='adaptation_rate') num_adaptation_steps = tf.convert_to_tensor( num_adaptation_steps, dtype=tf.int32, name='num_adaptation_steps') target_accept_prob = _maybe_validate_target_accept_prob( target_accept_prob, validate_args) self._parameters = dict( inner_kernel=inner_kernel, num_adaptation_steps=num_adaptation_steps, target_accept_prob=target_accept_prob, adaptation_rate=adaptation_rate, step_size_setter_fn=step_size_setter_fn, step_size_getter_fn=step_size_getter_fn, log_accept_prob_getter_fn=log_accept_prob_getter_fn, reduce_fn=reduce_fn, experimental_reduce_chain_axis_names=( experimental_reduce_chain_axis_names), name=name, ) @property def inner_kernel(self): return self._parameters['inner_kernel'] @property def name(self): return self._parameters['name'] @property def num_adaptation_steps(self): return self._parameters['num_adaptation_steps'] def step_size_setter_fn(self, kernel_results, new_step_size): return self._parameters['step_size_setter_fn'](kernel_results, new_step_size) def step_size_getter_fn(self, kernel_results): return self._parameters['step_size_getter_fn'](kernel_results) def log_accept_prob_getter_fn(self, kernel_results): return self._parameters['log_accept_prob_getter_fn'](kernel_results) def reduce_fn(self, input_tensor, axis, keepdims, experimental_named_axis=None): if experimental_named_axis is None: return self._parameters['reduce_fn']( input_tensor, axis=axis, keepdims=keepdims) return self._parameters['reduce_fn']( input_tensor, axis=axis, keepdims=keepdims, experimental_named_axis=experimental_named_axis) @property def parameters(self): """Return `dict` of ``__init__`` arguments and their values.""" return self._parameters def one_step(self, current_state, previous_kernel_results, seed=None): with tf.name_scope(mcmc_util.make_name( self.name, 'simple_step_size_adaptation', 'one_step')): # Set the step_size. inner_results = self.step_size_setter_fn( previous_kernel_results.inner_results, previous_kernel_results.new_step_size) # Step the inner kernel. inner_kwargs = {} if seed is None else dict(seed=seed) new_state, new_inner_results = self.inner_kernel.one_step( current_state, inner_results, **inner_kwargs) # Get the new step size. log_accept_prob = self.log_accept_prob_getter_fn(new_inner_results) log_target_accept_prob = tf.math.log( tf.cast(previous_kernel_results.target_accept_prob, dtype=log_accept_prob.dtype)) state_parts = tf.nest.flatten(current_state) step_size = self.step_size_getter_fn(new_inner_results) step_size_parts = tf.nest.flatten(step_size) log_accept_prob_rank = prefer_static.rank(log_accept_prob) new_step_size_parts = [] for step_size_part, state_part in zip(step_size_parts, state_parts): # Compute new step sizes for each step size part. If step size part has # smaller rank than the corresponding state part, then the difference is # averaged away in the log accept prob. # # Example: # # state_part has shape [2, 3, 4, 5] # step_size_part has shape [1, 4, 1] # log_accept_prob has shape [2, 3, 4] # # Since step size has 1 rank fewer than the state, we reduce away the # leading dimension of log_accept_prob to get a Tensor with shape [3, # 4]. Next, since log_accept_prob must broadcast into step_size_part on # the left, we reduce the dimensions where their shapes differ, to get a # Tensor with shape [1, 4], which now is compatible with the leading # dimensions of step_size_part. # # There is a subtlety here in that step_size_parts might be a length-1 # list, which means that we'll be "structure-broadcasting" it for all # the state parts (see logic in, e.g., hmc.py). In this case we must # assume that that the lone step size provided broadcasts with the event # dims of each state part. This means that either step size has no # dimensions corresponding to chain dimensions, or all states are of the # same shape. For the former, we want to reduce over all chain # dimensions. For the later, we want to use the same logic as in the # non-structure-broadcasted case. # # It turns out we can compute the reduction dimensions for both cases # uniformly by taking the rank of any state part. This obviously works # in the second case (where all state ranks are the same). In the first # case, all state parts have the rank L + D_i + B, where L is the rank # of log_accept_prob, D_i is the non-shared dimensions amongst all # states, and B are the shared dimensions of all the states, which are # equal to the step size. When we subtract B, we will always get a # number >= L, which means we'll get the full reduction we want. num_reduce_dims = prefer_static.minimum( log_accept_prob_rank, prefer_static.rank(state_part) - prefer_static.rank(step_size_part)) reduced_log_accept_prob = self.reduce_fn( log_accept_prob, axis=prefer_static.range(num_reduce_dims), keepdims=False, experimental_named_axis=self.experimental_reduce_chain_axis_names) # reduced_log_accept_prob must broadcast into step_size_part on the # left, so we do an additional reduction over dimensions where their # shapes differ. reduce_indices = get_differing_dims(reduced_log_accept_prob, step_size_part) reduced_log_accept_prob = self.reduce_fn( reduced_log_accept_prob, axis=reduce_indices, keepdims=True) one_plus_adaptation_rate = 1. + tf.cast( previous_kernel_results.adaptation_rate, dtype=step_size_part.dtype) new_step_size_part = mcmc_util.choose( reduced_log_accept_prob > log_target_accept_prob, step_size_part * one_plus_adaptation_rate, step_size_part / one_plus_adaptation_rate) new_step_size_parts.append( tf.where(previous_kernel_results.step < self.num_adaptation_steps, new_step_size_part, step_size_part)) new_step_size = tf.nest.pack_sequence_as(step_size, new_step_size_parts) return new_state, previous_kernel_results._replace( inner_results=new_inner_results, step=1 + previous_kernel_results.step, new_step_size=new_step_size) def bootstrap_results(self, init_state): with tf.name_scope(mcmc_util.make_name( self.name, 'simple_step_size_adaptation', 'bootstrap_results')): inner_results = self.inner_kernel.bootstrap_results(init_state) step_size = self.step_size_getter_fn(inner_results) return SimpleStepSizeAdaptationResults( inner_results=inner_results, step=tf.constant(0, dtype=tf.int32), target_accept_prob=self.parameters['target_accept_prob'], adaptation_rate=self.parameters['adaptation_rate'], new_step_size=step_size) @property def is_calibrated(self): return self.inner_kernel.is_calibrated @property def experimental_shard_axis_names(self): return self.inner_kernel.experimental_shard_axis_names def experimental_with_shard_axes(self, shard_axis_names): return self.copy( inner_kernel=self.inner_kernel.experimental_with_shard_axes( shard_axis_names)) @property def experimental_reduce_chain_axis_names(self): return self._parameters['experimental_reduce_chain_axis_names'] def _maybe_validate_target_accept_prob(target_accept_prob, validate_args): """Validates that target_accept_prob is in (0, 1).""" if not validate_args: return target_accept_prob with tf.control_dependencies([ assert_util.assert_positive( target_accept_prob, message='`target_accept_prob` must be > 0.'), assert_util.assert_less( target_accept_prob, tf.ones_like(target_accept_prob), message='`target_accept_prob` must be < 1.') ]): return tf.identity(target_accept_prob)
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for partitioned_variables.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf def _IotaInitializer(shape, dtype=tf.float32): assert dtype == tf.float32 if len(shape) == 1: return range(shape[0]) else: val = _IotaInitializer(shape[1:], dtype) return [[(10 ** i) * v for v in val] for i in range(shape[0])] class PartitionedVariablesTestCase(tf.test.TestCase): def _TestSaveSpec(self, slices, expected_specs): self.assertEqual(len(expected_specs), len(slices)) for i in xrange(len(expected_specs)): self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec) def testVecConstantInit(self): with self.test_session(): rnd_par = tf.constant([1, 2, 3, 4]) vs = tf.create_partitioned_variables([4], [4], rnd_par) tf.initialize_all_variables().run() val = tf.concat(0, vs).eval() rnd = rnd_par.eval() self.assertAllClose(rnd, val) self.assertEqual([tf.int32] * 4, [v.dtype.base_dtype for v in vs]) self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"]) def testConstantInit(self): with self.test_session(): rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) vs = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) tf.initialize_all_variables().run() val = tf.concat(1, vs).eval() rnd = rnd_par.eval() self.assertAllClose(rnd, val) self.assertEqual([tf.int32] * 2, [v.dtype.base_dtype for v in vs]) self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"]) def testName(self): with self.test_session(): rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) with tf.variable_scope("hi"): vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) tf.initialize_all_variables().run() var1_name = vs1[0]._save_slice_info.full_name var2_name = vs2[0]._save_slice_info.full_name self.assertEqual("hi/PartitionedVariable", var1_name) self.assertEqual("hi/PartitionedVariable_1", var2_name) self.assertEqual(var1_name + "/part_0:0", vs1[0].name) self.assertEqual(var1_name + "/part_1:0", vs1[1].name) self.assertEqual(var2_name + "/part_0:0", vs2[0].name) self.assertEqual(var2_name + "/part_1:0", vs2[1].name) # Test same variable. with self.test_session(): rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) with tf.variable_scope("hola") as vs: vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) with tf.variable_scope(vs, reuse=True): vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) tf.initialize_all_variables().run() var1_name = vs1[0]._save_slice_info.full_name var2_name = vs2[0]._save_slice_info.full_name self.assertEqual("hola/PartitionedVariable", var1_name) self.assertEqual("hola/PartitionedVariable", var2_name) self.assertEqual(var1_name + "/part_0:0", vs1[0].name) self.assertEqual(var1_name + "/part_1:0", vs1[1].name) self.assertEqual(var2_name + "/part_0:0", vs2[0].name) self.assertEqual(var2_name + "/part_1:0", vs2[1].name) # Test name_scope with self.test_session(): rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) with tf.name_scope("ola"): vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par) tf.initialize_all_variables().run() var1_name = vs1[0]._save_slice_info.full_name var2_name = vs2[0]._save_slice_info.full_name # Currently, the name scope 'ola' has no effect. self.assertEqual("PartitionedVariable", var1_name) self.assertEqual("PartitionedVariable_1", var2_name) self.assertEqual(var1_name + "/part_0:0", vs1[0].name) self.assertEqual(var1_name + "/part_1:0", vs1[1].name) self.assertEqual(var2_name + "/part_0:0", vs2[0].name) self.assertEqual(var2_name + "/part_1:0", vs2[1].name) def testRandomInitValue(self): with self.test_session(): rnd = tf.Variable(tf.random_uniform([200, 40])) vs = tf.create_partitioned_variables( rnd.get_shape(), [1, 10], rnd.initialized_value()) tf.initialize_all_variables().run() val = tf.concat(1, vs).eval() rnd = rnd.eval() self.assertAllClose(rnd, val) self.assertEqual([tf.float32] * 10, [v.dtype.base_dtype for v in vs]) self._TestSaveSpec(vs, ["200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4", "200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4", "200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4", "200 40 0,200:36,4"]) def testRandomInitUnevenPartitions(self): with self.test_session(): rnd = tf.Variable( tf.random_uniform([20, 43], dtype=tf.float64)) var_lists = [ tf.create_partitioned_variables( rnd.get_shape(), [1, i], rnd.initialized_value()) for i in xrange(1, 10)] tf.initialize_all_variables().run() rnd_val = rnd.eval() # Only check the slice save specs for the first 5 tf. save_specs = [ # One slice ["20 43 0,20:0,43"], # Two slices ["20 43 0,20:0,22", "20 43 0,20:22,21"], # Three slices ["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"], # Four slices ["20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11", "20 43 0,20:33,10"], # Five slices ["20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9", "20 43 0,20:27,8", "20 43 0,20:35,8"]] for i, vs in enumerate(var_lists): var_val = tf.concat(1, vs).eval() self.assertAllClose(rnd_val, var_val) self.assertEqual( [tf.float64] * len(vs), [v.dtype.base_dtype for v in vs]) if i < len(save_specs): self._TestSaveSpec(vs, save_specs[i]) def testDegenerate(self): with self.test_session(): rnd = tf.Variable(tf.random_uniform([10, 43])) vs = tf.create_partitioned_variables( rnd.get_shape(), [1, 1], rnd.initialized_value()) tf.initialize_all_variables().run() val = tf.concat(0, vs).eval() rnd = rnd.eval() self.assertAllClose(rnd, val) self._TestSaveSpec(vs, ["10 43 0,10:0,43"]) def testSliceSizeOne(self): with self.test_session(): rnd = tf.Variable(tf.random_uniform([10, 43])) vs = tf.create_partitioned_variables( rnd.get_shape(), [10, 1], rnd.initialized_value()) tf.initialize_all_variables().run() val = tf.concat(0, vs).eval() rnd = rnd.eval() self.assertAllClose(rnd, val) self._TestSaveSpec(vs, ["10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43", "10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43", "10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"]) def testIotaInitializer(self): self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4])) self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]], _IotaInitializer([4, 2])) with self.test_session(): vs = tf.create_partitioned_variables([13, 5], [3, 1], _IotaInitializer) tf.initialize_all_variables().run() slice0 = _IotaInitializer([5, 5]) slice1 = _IotaInitializer([4, 5]) slice2 = _IotaInitializer([4, 5]) val = tf.concat(0, vs).eval() self.assertAllClose(slice0 + slice1 + slice2, val) self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"]) def testRandomInitializer(self): # Sanity check that the slices uses a different seed when using a random # initializer function. with self.test_session(): var0, var1 = tf.create_partitioned_variables( [20, 12], [1, 2], tf.random_uniform_initializer()) tf.initialize_all_variables().run() val0, val1 = var0.eval().flatten(), var1.eval().flatten() self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6) # Negative test that proves that slices have the same values if # the random initializer uses a seed. with self.test_session(): var0, var1 = tf.create_partitioned_variables( [20, 12], [1, 2], tf.random_uniform_initializer(seed=201)) tf.initialize_all_variables().run() val0, val1 = var0.eval().flatten(), var1.eval().flatten() self.assertAllClose(val0, val1) def testSomeErrors(self): with self.test_session(): rnd = tf.Variable(tf.random_uniform([10, 43])) with self.assertRaises(ValueError): tf.create_partitioned_variables([10], [1, 1], rnd.initialized_value()) with self.assertRaises(ValueError): tf.create_partitioned_variables([10, 20], [1], rnd.initialized_value()) with self.assertRaises(ValueError): tf.create_partitioned_variables([10, 43], [1], rnd.initialized_value()) with self.assertRaises(ValueError): tf.create_partitioned_variables( [10, 43], [1, 2, 3], rnd.initialized_value()) with self.assertRaises(ValueError): tf.create_partitioned_variables( [10, 43], [11, 1], rnd.initialized_value()) with self.assertRaises(ValueError): tf.create_partitioned_variables( [10, 43], [20, 1], rnd.initialized_value()) with self.assertRaises(ValueError): tf.create_partitioned_variables( [10, 43], [1, 50], rnd.initialized_value()) if __name__ == "__main__": tf.test.main()
import os import shutil import tempfile import functools from getpass import getuser from mock import patch from StringIO import StringIO from mock import Mock, patch from zope.interface import implements from twisted.trial import unittest from twisted.test import proto_helpers from twisted.internet import defer, error, task, tcp from twisted.internet.endpoints import TCP4ServerEndpoint, serverFromString from twisted.python.failure import Failure from twisted.internet.interfaces import IReactorCore from twisted.internet.interfaces import IProtocolFactory from twisted.internet.interfaces import IProtocol from twisted.internet.interfaces import IReactorTCP from twisted.internet.interfaces import IListeningPort from twisted.internet.interfaces import IAddress from txtorcon import TorControlProtocol from txtorcon import ITorControlProtocol from txtorcon import TorConfig from txtorcon import DEFAULT_VALUE from txtorcon import HiddenService from txtorcon import launch_tor from txtorcon import TCPHiddenServiceEndpoint from txtorcon import TorNotFound from txtorcon import TCPHiddenServiceEndpointParser from txtorcon import IProgressProvider from txtorcon import torconfig from txtorcon.torconfig import TorProcessProtocol from txtorcon.util import delete_file_or_tree from txtorcon.torconfig import parse_client_keys class FakeControlProtocol: """ This is a little weird, but in most tests the answer at the top of the list is sent back immediately in an already-called Deferred. However, if the answer list is empty at the time of the call, instead the returned Deferred is added to the pending list and answer_pending() may be called to have the next Deferred fire. (see test_slutty_postbootstrap for an example). It is done this way in case we need to have some other code run between the get_conf (or whatever) and the callback -- if the Deferred is already-fired when get_conf runs, there's a Very Good Chance (always?) that the callback just runs right away. """ implements(ITorControlProtocol) # actually, just get_info_raw def __init__(self, answers): self.answers = answers self.pending = [] self.post_bootstrap = defer.succeed(self) self.on_disconnect = defer.Deferred() self.sets = [] self.events = {} #: event type -> callback self.pending_events = {} #: event type -> list self.is_owned = -1 def event_happened(self, event_type, *args): ''' Use this in your tests to send 650 events when an event-listener is added. XXX Also if we've *already* added one? Do that if there's a use-case for it ''' if event_type in self.pending_events: self.pending_events[event_type].append(args) else: self.pending_events[event_type] = [args] def answer_pending(self, answer): d = self.pending[0] self.pending = self.pending[1:] d.callback(answer) def get_info_raw(self, info): if len(self.answers) == 0: d = defer.Deferred() self.pending.append(d) return d d = defer.succeed(self.answers[0]) self.answers = self.answers[1:] return d @defer.inlineCallbacks def get_info_incremental(self, info, cb): text = yield self.get_info_raw(info) for line in text.split('\r\n'): cb(line) defer.returnValue('') # FIXME uh....what's up at torstate.py:350? def get_conf(self, info): if len(self.answers) == 0: d = defer.Deferred() self.pending.append(d) return d d = defer.succeed(self.answers[0]) self.answers = self.answers[1:] return d get_conf_raw = get_conf # up to test author ensure the answer is a raw string def set_conf(self, *args): for i in range(0, len(args), 2): self.sets.append((args[i], args[i + 1])) return defer.succeed('') def add_event_listener(self, nm, cb): self.events[nm] = cb if nm in self.pending_events: for event in self.pending_events[nm]: cb(*event) def remove_event_listener(self, nm, cb): del self.events[nm] class CheckAnswer: def __init__(self, test, ans): self.answer = ans self.test = test def __call__(self, x): self.test.assertEqual(x, self.answer) class ConfigTests(unittest.TestCase): """ FIXME hmm, this all seems a little convoluted to test errors? Maybe not that bad. """ def setUp(self): self.protocol = FakeControlProtocol([]) def test_boolean_parse_error(self): self.protocol.answers.append('config/names=\nfoo Boolean') self.protocol.answers.append({'foo': 'bar'}) cfg = TorConfig(self.protocol) return self.assertFailure(cfg.post_bootstrap, ValueError) def test_contains(self): cfg = TorConfig() cfg.ControlPort = 4455 self.assertTrue('ControlPort' in cfg) def test_boolean_parser(self): self.protocol.answers.append('config/names=\nfoo Boolean\nbar Boolean') self.protocol.answers.append({'foo': '0'}) self.protocol.answers.append({'bar': '1'}) # FIXME does a Tor controller only ever send "0" and "1" for # true/false? Or do we need to accept others? conf = TorConfig(self.protocol) self.assertTrue(conf.foo is False) self.assertTrue(conf.bar is True) def test_boolean_auto_parser(self): self.protocol.answers.append( 'config/names=\nfoo Boolean+Auto\nbar Boolean+Auto\nbaz Boolean+Auto' ) self.protocol.answers.append({'foo': '0'}) self.protocol.answers.append({'bar': '1'}) self.protocol.answers.append({'baz': 'auto'}) conf = TorConfig(self.protocol) self.assertTrue(conf.foo is 0) self.assertTrue(conf.bar is 1) self.assertTrue(conf.baz is -1) def test_string_parser(self): self.protocol.answers.append('config/names=\nfoo String') self.protocol.answers.append({'foo': 'bar'}) conf = TorConfig(self.protocol) self.assertEqual(conf.foo, 'bar') def test_int_parser(self): self.protocol.answers.append('config/names=\nfoo Integer') self.protocol.answers.append({'foo': '123'}) conf = TorConfig(self.protocol) self.assertEqual(conf.foo, 123) def test_int_parser_error(self): self.protocol.answers.append('config/names=\nfoo Integer') self.protocol.answers.append({'foo': '123foo'}) cfg = TorConfig(self.protocol) self.assertFailure(cfg.post_bootstrap, ValueError) def test_int_parser_error_2(self): self.protocol.answers.append('config/names=\nfoo Integer') self.protocol.answers.append({'foo': '1.23'}) cfg = TorConfig(self.protocol) return self.assertFailure(cfg.post_bootstrap, ValueError) def test_linelist_parser(self): self.protocol.answers.append('config/names=\nfoo LineList') self.protocol.answers.append({'foo': 'bar\nbaz'}) conf = TorConfig(self.protocol) self.assertEqual(conf.foo, ['bar', 'baz']) def test_listlist_parser_with_list(self): self.protocol.answers.append('config/names=\nfoo LineList') self.protocol.answers.append({'foo': [1, 2, 3]}) conf = TorConfig(self.protocol) self.assertEqual(conf.foo, ['1', '2', '3']) def test_float_parser(self): self.protocol.answers.append('config/names=\nfoo Float') self.protocol.answers.append({'foo': '1.23'}) conf = TorConfig(self.protocol) self.assertEqual(conf.foo, 1.23) def test_float_parser_error(self): self.protocol.answers.append('config/names=\nfoo Float') self.protocol.answers.append({'foo': '1.23fff'}) cfg = TorConfig(self.protocol) return self.assertFailure(cfg.post_bootstrap, ValueError) def test_list(self): self.protocol.answers.append('config/names=\nbing CommaList') self.protocol.answers.append({'bing': 'foo,bar,baz'}) conf = TorConfig(self.protocol) self.assertEqual(conf.config['bing'], ['foo', 'bar', 'baz']) # self.assertEqual(conf.bing, ['foo','bar','baz']) def test_single_list(self): self.protocol.answers.append('config/names=\nbing CommaList') self.protocol.answers.append({'bing': 'foo'}) conf = TorConfig(self.protocol) self.assertTrue(conf.post_bootstrap.called) self.assertEqual(conf.config['bing'], ['foo']) def test_multi_list_space(self): self.protocol.answers.append('config/names=\nbing CommaList') self.protocol.answers.append({'bing': 'foo, bar , baz'}) conf = TorConfig(self.protocol) self.assertEqual(conf.bing, ['foo', 'bar', 'baz']) def test_descriptor_access(self): self.protocol.answers.append('config/names=\nbing CommaList') self.protocol.answers.append({'bing': 'foo,bar'}) conf = TorConfig(self.protocol) self.assertEqual(conf.config['bing'], ['foo', 'bar']) self.assertEqual(conf.bing, ['foo', 'bar']) self.protocol.answers.append('250 OK') conf.bing = ['a', 'b'] self.assertEqual(conf.bing, ['foo', 'bar']) d = conf.save() def confirm(conf): self.assertEqual(conf.config['bing'], ['a', 'b']) self.assertEqual(conf.bing, ['a', 'b']) d.addCallbacks(confirm, self.fail) return d def test_unknown_descriptor(self): self.protocol.answers.append('config/names=\nbing CommaList') self.protocol.answers.append({'bing': 'foo'}) conf = TorConfig(self.protocol) try: conf.foo self.assertTrue(False) except KeyError, e: self.assertTrue('foo' in str(e)) def test_invalid_parser(self): self.protocol.answers.append( 'config/names=\nSomethingExciting NonExistantParserType' ) cfg = TorConfig(self.protocol) return self.assertFailure(cfg.post_bootstrap, RuntimeError) def test_iteration(self): conf = TorConfig() conf.SOCKSPort = 9876 conf.save() x = list(conf) self.assertEqual(x, ['SOCKSPort']) conf.save() def test_get_type(self): self.protocol.answers.append( 'config/names=\nSomethingExciting CommaList\nHiddenServices Dependant' ) self.protocol.answers.append({'SomethingExciting': 'a,b'}) conf = TorConfig(self.protocol) from txtorcon.torconfig import CommaList, HiddenService self.assertEqual(conf.get_type('SomethingExciting'), CommaList) self.assertEqual(conf.get_type('HiddenServices'), HiddenService) def test_immediate_hiddenservice_append(self): '''issue #88. we check that a .append(hs) works on a blank TorConfig''' conf = TorConfig() hs = HiddenService(conf, '/dev/null', ['80 127.0.0.1:1234']) conf.HiddenServices.append(hs) self.assertEqual(len(conf.HiddenServices), 1) self.assertEqual(conf.HiddenServices[0], hs) def foo(self, *args): print "FOOO", args def test_slutty_postbootstrap(self): # test that doPostbootstrap still works in "slutty" mode self.protocol.answers.append('config/names=\nORPort Port') # we can't answer right away, or we do all the _do_setup # callbacks before _setup_ is set -- but we need to do an # answer callback after that to trigger this bug conf = TorConfig(self.protocol) self.assertTrue('_setup_' in conf.__dict__) self.protocol.answer_pending({'ORPort': 1}) def test_immediate_bootstrap(self): self.protocol.post_bootstrap = None self.protocol.answers.append('config/names=\nfoo Boolean') self.protocol.answers.append({'foo': '0'}) conf = TorConfig(self.protocol) self.assertTrue('foo' in conf.config) def test_multiple_orports(self): self.protocol.post_bootstrap = None self.protocol.answers.append('config/names=\nOrPort CommaList') self.protocol.answers.append({'OrPort': '1234'}) conf = TorConfig(self.protocol) conf.OrPort = ['1234', '4321'] conf.save() self.assertEqual(self.protocol.sets, [('OrPort', '1234'), ('OrPort', '4321')]) def test_set_multiple(self): self.protocol.answers.append('config/names=\nAwesomeKey String') self.protocol.answers.append({'AwesomeKey': 'foo'}) conf = TorConfig(self.protocol) conf.awesomekey conf.awesomekey = 'baz' self.assertTrue(conf.needs_save()) conf.awesomekey = 'nybble' conf.awesomekey = 'pac man' conf.save() self.assertEqual(len(self.protocol.sets), 1) self.assertEqual(self.protocol.sets[0], ('AwesomeKey', 'pac man')) def test_log_double_save(self): self.protocol.answers.append( 'config/names=\nLog LineList\nFoo String''' ) self.protocol.answers.append( {'Log': 'notice file /var/log/tor/notices.log'} ) self.protocol.answers.append({'Foo': 'foo'}) conf = TorConfig(self.protocol) conf.log.append('info file /tmp/foo.log') conf.foo = 'bar' self.assertTrue(conf.needs_save()) conf.save() conf.save() # just for the code coverage... self.assertTrue(not conf.needs_save()) self.protocol.sets = [] conf.save() self.assertEqual(self.protocol.sets, []) def test_set_save_modify(self): self.protocol.answers.append('config/names=\nLog LineList') self.protocol.answers.append( {'Log': 'notice file /var/log/tor/notices.log'} ) conf = TorConfig(self.protocol) conf.log = [] self.assertTrue(conf.needs_save()) conf.save() conf.log.append('notice file /tmp/foo.log') self.assertTrue(conf.needs_save()) def test_proper_sets(self): self.protocol.answers.append('config/names=\nLog LineList') self.protocol.answers.append({'Log': 'foo'}) conf = TorConfig(self.protocol) conf.log.append('bar') conf.save() self.assertEqual(len(self.protocol.sets), 2) self.assertEqual(self.protocol.sets[0], ('Log', 'foo')) self.assertEqual(self.protocol.sets[1], ('Log', 'bar')) @defer.inlineCallbacks def test_attach_protocol(self): self.protocol.answers.append('config/names=\nLog LineList') self.protocol.answers.append({'Log': 'foo'}) conf = TorConfig() d = conf.attach_protocol(self.protocol) yield d conf.log.append('bar') yield conf.save() self.assertEqual(len(self.protocol.sets), 2) self.assertEqual(self.protocol.sets[0], ('Log', 'foo')) self.assertEqual(self.protocol.sets[1], ('Log', 'bar')) def test_attach_protocol_but_already_have_one(self): conf = TorConfig(self.protocol) self.assertRaises(RuntimeError, conf.attach_protocol, self.protocol) def test_no_confchanged_event(self): conf = TorConfig(self.protocol) self.protocol.add_event_listener = Mock(side_effect=RuntimeError) d = defer.Deferred() self.protocol.get_info_raw = Mock(return_value=d) conf.bootstrap() # this should log a message, do we really care what? def test_attribute_access(self): conf = TorConfig(self.protocol) self.assertNotIn('_slutty_', conf.__dict__) self.assertNotIn('foo', conf) class LogTests(unittest.TestCase): def setUp(self): self.protocol = FakeControlProtocol([]) self.protocol.answers.append('config/names=\nLog LineList''') self.protocol.answers.append( {'Log': 'notice file /var/log/tor/notices.log'} ) def test_log_set(self): conf = TorConfig(self.protocol) conf.log.append('info file /tmp/foo.log') self.assertTrue(conf.needs_save()) conf.save() self.assertEqual( self.protocol.sets[0], ('Log', 'notice file /var/log/tor/notices.log') ) self.assertEqual( self.protocol.sets[1], ('Log', 'info file /tmp/foo.log') ) def test_log_set_capital(self): conf = TorConfig(self.protocol) conf.Log.append('info file /tmp/foo.log') self.assertTrue(conf.needs_save()) conf.save() self.assertEqual( self.protocol.sets[0], ('Log', 'notice file /var/log/tor/notices.log') ) self.assertEqual( self.protocol.sets[1], ('Log', 'info file /tmp/foo.log') ) def test_log_set_index(self): conf = TorConfig(self.protocol) conf.log[0] = 'info file /tmp/foo.log' self.assertTrue(conf.needs_save()) conf.save() self.assertEqual( self.protocol.sets[0], ('Log', 'info file /tmp/foo.log') ) def test_log_set_slice(self): conf = TorConfig(self.protocol) conf.log[0:1] = ['info file /tmp/foo.log'] self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(1, len(self.protocol.sets)) self.assertEqual( self.protocol.sets[0], ('Log', 'info file /tmp/foo.log') ) def test_log_set_pop(self): conf = TorConfig(self.protocol) self.assertEqual(len(conf.log), 1) conf.log.pop() self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(len(conf.log), 0) self.assertEqual(len(self.protocol.sets), 0) def test_log_set_extend(self): conf = TorConfig(self.protocol) self.assertEqual(len(conf.log), 1) conf.log.extend(['info file /tmp/foo']) self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(len(conf.log), 2) self.assertEqual(len(self.protocol.sets), 2) self.assertEqual( self.protocol.sets[0], ('Log', 'notice file /var/log/tor/notices.log') ) self.assertEqual( self.protocol.sets[1], ('Log', 'info file /tmp/foo') ) def test_log_set_insert(self): conf = TorConfig(self.protocol) self.assertEqual(len(conf.log), 1) conf.log.insert(0, 'info file /tmp/foo') self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(len(conf.log), 2) self.assertEqual(len(self.protocol.sets), 2) self.assertEqual( self.protocol.sets[1], ('Log', 'notice file /var/log/tor/notices.log') ) self.assertEqual( self.protocol.sets[0], ('Log', 'info file /tmp/foo') ) def test_log_set_remove(self): conf = TorConfig(self.protocol) self.assertEqual(len(conf.log), 1) conf.log.remove('notice file /var/log/tor/notices.log') self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(len(conf.log), 0) self.assertEqual(len(self.protocol.sets), 0) def test_log_set_multiple(self): conf = TorConfig(self.protocol) self.assertEqual(len(conf.log), 1) conf.log[0] = 'foo' self.assertTrue(conf.needs_save()) conf.log[0] = 'heavy' conf.log[0] = 'round' conf.save() self.assertEqual(len(self.protocol.sets), 1) self.assertEqual(self.protocol.sets[0], ('Log', 'round')) def test_set_wrong_object(self): conf = TorConfig(self.protocol) self.assertTrue(conf.post_bootstrap.called) try: conf.log = ('this', 'is', 'a', 'tuple') self.fail() except ValueError, e: self.assertTrue('Not valid' in str(e)) class EventTests(unittest.TestCase): def test_conf_changed(self): control = FakeControlProtocol([]) config = TorConfig(control) self.assertTrue('CONF_CHANGED' in control.events) control.events['CONF_CHANGED']('Foo=bar\nBar') self.assertEqual(len(config.config), 2) self.assertEqual(config.Foo, 'bar') self.assertEqual(config.Bar, DEFAULT_VALUE) class CreateTorrcTests(unittest.TestCase): def test_create_torrc(self): config = TorConfig() config.SocksPort = 1234 config.hiddenservices = [ HiddenService(config, '/some/dir', '80 127.0.0.1:1234', 'auth', 2, True) ] config.Log = ['80 127.0.0.1:80', '90 127.0.0.1:90'] config.save() torrc = config.create_torrc() lines = torrc.split('\n') lines.sort() torrc = '\n'.join(lines).strip() self.assertEqual(torrc, '''HiddenServiceAuthorizeClient auth HiddenServiceDir /some/dir HiddenServicePort 80 127.0.0.1:1234 HiddenServiceVersion 2 Log 80 127.0.0.1:80 Log 90 127.0.0.1:90 SocksPort 1234''') class HiddenServiceTests(unittest.TestCase): def setUp(self): self.protocol = FakeControlProtocol([]) self.protocol.answers.append('''config/names= HiddenServiceOptions Virtual HiddenServiceVersion Dependant HiddenServiceDirGroupReadable Dependant HiddenServiceAuthorizeClient Dependant''') @defer.inlineCallbacks def test_options_hidden(self): self.protocol.answers.append( 'HiddenServiceDir=/fake/path\nHiddenServicePort=80 ' '127.0.0.1:1234\nHiddenServiceDirGroupReadable=1\n' ) conf = TorConfig(self.protocol) yield conf.post_bootstrap self.assertTrue(conf.post_bootstrap.called) self.assertTrue('HiddenServiceOptions' not in conf.config) self.assertTrue('HiddenServices' in conf.config) self.assertEqual(len(conf.HiddenServices), 1) self.assertTrue(not conf.needs_save()) conf.hiddenservices.append( HiddenService(conf, '/some/dir', '80 127.0.0.1:2345', 'auth', 2, True) ) conf.hiddenservices[0].ports.append('443 127.0.0.1:443') self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(len(self.protocol.sets), 9) self.assertEqual(self.protocol.sets[0], ('HiddenServiceDir', '/fake/path')) self.assertEqual(self.protocol.sets[1], ('HiddenServiceDirGroupReadable', '1')) self.assertEqual(self.protocol.sets[2], ('HiddenServicePort', '80 127.0.0.1:1234')) self.assertEqual(self.protocol.sets[3], ('HiddenServicePort', '443 127.0.0.1:443')) self.assertEqual(self.protocol.sets[4], ('HiddenServiceDir', '/some/dir')) self.assertEqual(self.protocol.sets[5], ('HiddenServiceDirGroupReadable', '1')) self.assertEqual(self.protocol.sets[6], ('HiddenServicePort', '80 127.0.0.1:2345')) self.assertEqual(self.protocol.sets[7], ('HiddenServiceVersion', '2')) self.assertEqual(self.protocol.sets[8], ('HiddenServiceAuthorizeClient', 'auth')) def test_save_no_protocol(self): conf = TorConfig() conf.HiddenServices = [HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'])] conf.save() def test_two_hidden_services_before_save(self): conf = TorConfig() conf.HiddenServices = [HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'])] conf.HiddenServices.append(HiddenService(conf, '/fake/path/two', ['1234 127.0.0.1:1234'])) conf.save() self.assertEqual(2, len(conf.HiddenServices)) def test_onion_keys(self): # FIXME test without crapping on filesystem self.protocol.answers.append('HiddenServiceDir=/fake/path\n') d = tempfile.mkdtemp() try: with open(os.path.join(d, 'hostname'), 'w') as f: f.write('public') with open(os.path.join(d, 'private_key'), 'w') as f: f.write('private') with open(os.path.join(d, 'client_keys'), 'w') as f: f.write('client-name hungry\ndescriptor-cookie omnomnom\n') conf = TorConfig(self.protocol) hs = HiddenService(conf, d, []) self.assertEqual(hs.hostname, 'public') self.assertEqual(hs.private_key, 'private') self.assertEqual(len(hs.client_keys), 1) self.assertEqual(hs.client_keys[0].name, 'hungry') self.assertEqual(hs.client_keys[0].cookie, 'omnomnom') self.assertEqual(hs.client_keys[0].key, None) finally: shutil.rmtree(d, ignore_errors=True) def test_modify_hidden_service(self): self.protocol.answers.append('HiddenServiceDir=/fake/path\nHiddenServicePort=80 127.0.0.1:1234\n') conf = TorConfig(self.protocol) conf.hiddenservices[0].version = 3 self.assertTrue(conf.needs_save()) def test_add_hidden_service_to_empty_config(self): conf = TorConfig() h = HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'], '', 3) conf.HiddenServices.append(h) self.assertEqual(len(conf.hiddenservices), 1) self.assertEqual(h, conf.hiddenservices[0]) self.assertTrue(conf.needs_save()) def test_multiple_append(self): conf = TorConfig() h0 = HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'], '', 3) h1 = HiddenService(conf, '/fake/path', ['90 127.0.0.1:4321'], '', 3) h2 = HiddenService(conf, '/fake/path', ['90 127.0.0.1:5432'], '', 3, True) conf.hiddenservices = [h0] conf.hiddenservices.append(h1) conf.hiddenservices.append(h2) self.assertEqual(len(conf.hiddenservices), 3) self.assertEqual(h0, conf.hiddenservices[0]) self.assertEqual(h1, conf.hiddenservices[1]) self.assertEqual(h2, conf.hiddenservices[2]) self.assertTrue(conf.needs_save()) def test_multiple_startup_services(self): conf = TorConfig(FakeControlProtocol(['config/names='])) conf._setup_hidden_services('''HiddenServiceDir=/fake/path HiddenServicePort=80 127.0.0.1:1234 HiddenServiceVersion=2 HiddenServiceAuthorizeClient=basic HiddenServiceDir=/some/other/fake/path HiddenServicePort=80 127.0.0.1:1234 HiddenServicePort=90 127.0.0.1:2345''') self.assertEqual(len(conf.hiddenservices), 2) self.assertEqual(conf.hiddenservices[0].dir, '/fake/path') self.assertEqual(conf.hiddenservices[0].version, 2) self.assertEqual(len(conf.hiddenservices[0].authorize_client), 1) self.assertEqual(conf.hiddenservices[0].authorize_client[0], 'basic') self.assertEqual(len(conf.hiddenservices[0].ports), 1) self.assertEqual(conf.hiddenservices[0].ports[0], '80 127.0.0.1:1234') self.assertEqual(conf.hiddenservices[1].dir, '/some/other/fake/path') self.assertEqual(len(conf.hiddenservices[1].ports), 2) self.assertEqual(conf.hiddenservices[1].ports[0], '80 127.0.0.1:1234') self.assertEqual(conf.hiddenservices[1].ports[1], '90 127.0.0.1:2345') def test_hidden_service_parse_error(self): conf = TorConfig(FakeControlProtocol(['config/names='])) try: conf._setup_hidden_services('''FakeHiddenServiceKey=foo''') self.fail() except RuntimeError, e: self.assertTrue('parse' in str(e)) def test_hidden_service_directory_absolute_path(self): conf = TorConfig(FakeControlProtocol(['config/names='])) conf._setup_hidden_services('HiddenServiceDir=/fake/path/../path') self.assertEqual(len(self.flushWarnings()), 1) def test_hidden_service_same_directory(self): conf = TorConfig(FakeControlProtocol(['config/names='])) servicelines = '''HiddenServiceDir=/fake/path HiddenServiceDir=/fake/path''' self.assertRaises(RuntimeError, conf._setup_hidden_services, servicelines) conf = TorConfig() conf.HiddenServices = [HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'])] conf.HiddenServices.append(HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'])) self.assertTrue(conf.needs_save()) self.assertRaises(RuntimeError, conf.save) conf = TorConfig() conf.HiddenServices = [HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'])] conf.HiddenServices.append(HiddenService(conf, '/fake/path/two', ['80 127.0.0.1:1234'])) self.assertTrue(conf.needs_save()) conf.save() conf.hiddenservices[1].dir = '/fake/path' self.assertTrue(conf.needs_save()) self.assertRaises(RuntimeError, conf.save) def test_multiple_modify_hidden_service(self): self.protocol.answers.append('HiddenServiceDir=/fake/path\nHiddenServicePort=80 127.0.0.1:1234\n') conf = TorConfig(self.protocol) self.assertTrue(self.protocol.post_bootstrap.called) self.assertTrue(conf.post_bootstrap is None or conf.post_bootstrap.called) self.assertEqual(len(conf.hiddenservices), 1) self.assertTrue(conf.hiddenservices[0].conf) conf.hiddenservices[0].version = 3 self.assertTrue(conf.needs_save()) conf.hiddenservices[0].version = 4 conf.hiddenservices[0].version = 5 self.assertEqual(conf.hiddenservices[0].version, 5) conf.save() self.assertEqual(len(self.protocol.sets), 3) self.assertEqual(self.protocol.sets[0], ('HiddenServiceDir', '/fake/path')) self.assertEqual(self.protocol.sets[1], ('HiddenServicePort', '80 127.0.0.1:1234')) self.assertEqual(self.protocol.sets[2], ('HiddenServiceVersion', '5')) def test_set_save_modify(self): self.protocol.answers.append('') conf = TorConfig(self.protocol) conf.hiddenservices = [HiddenService(conf, '/fake/path', ['80 127.0.0.1:1234'], '', 3)] self.assertTrue(conf.needs_save()) conf.save() self.assertEqual(len(conf.hiddenservices), 1) self.assertEqual(conf.hiddenservices[0].dir, '/fake/path') self.assertEqual(conf.hiddenservices[0].version, 3) self.assertEqual(0, len(conf.hiddenservices[0].authorize_client)) conf.hiddenservices[0].ports = ['123 127.0.0.1:4321'] conf.save() self.assertTrue(not conf.needs_save()) conf.hiddenservices[0].ports.append('90 127.0.0.1:2345') self.assertTrue(conf.needs_save()) class FakeReactor(task.Clock): implements(IReactorCore) def __init__(self, test, trans, on_protocol): super(FakeReactor, self).__init__() self.test = test self.transport = trans self.on_protocol = on_protocol def spawnProcess(self, processprotocol, bin, args, env, path, uid=None, gid=None, usePTY=None, childFDs=None): self.protocol = processprotocol self.protocol.makeConnection(self.transport) self.transport.process_protocol = processprotocol self.on_protocol(self.protocol) return self.transport def addSystemEventTrigger(self, *args): self.test.assertEqual(args[0], 'before') self.test.assertEqual(args[1], 'shutdown') # we know this is just for the temporary file cleanup, so we # nuke it right away to avoid polluting /tmp by calling the # callback now. args[2]() def removeSystemEventTrigger(self, id): pass class FakeProcessTransport(proto_helpers.StringTransportWithDisconnection): pid = -1 def signalProcess(self, signame): self.process_protocol.processEnded( Failure(error.ProcessTerminated(signal=signame)) ) def closeStdin(self): self.protocol.dataReceived('250 OK\r\n') self.protocol.dataReceived('250 OK\r\n') self.protocol.dataReceived('250 OK\r\n') self.protocol.dataReceived( '650 STATUS_CLIENT NOTICE BOOTSTRAP PROGRESS=90 ' 'TAG=circuit_create SUMMARY="Establishing a Tor circuit"\r\n' ) self.protocol.dataReceived( '650 STATUS_CLIENT NOTICE BOOTSTRAP PROGRESS=100 ' 'TAG=done SUMMARY="Done"\r\n' ) class FakeProcessTransportNeverBootstraps(FakeProcessTransport): pid = -1 def closeStdin(self): self.protocol.dataReceived('250 OK\r\n') self.protocol.dataReceived('250 OK\r\n') self.protocol.dataReceived('250 OK\r\n') self.protocol.dataReceived( '650 STATUS_CLIENT NOTICE BOOTSTRAP PROGRESS=90 TAG=circuit_create ' 'SUMMARY="Establishing a Tor circuit"\r\n') class FakeProcessTransportNoProtocol(FakeProcessTransport): def closeStdin(self): pass class LaunchTorTests(unittest.TestCase): def setUp(self): self.protocol = TorControlProtocol() self.protocol.connectionMade = lambda: None self.transport = proto_helpers.StringTransport() self.protocol.makeConnection(self.transport) self.clock = task.Clock() def setup_complete_with_timer(self, proto): proto._check_timeout.stop() proto.checkTimeout() def setup_complete_no_errors(self, proto, config, stdout, stderr): self.assertEqual("Bootstrapped 100%\n", stdout.getvalue()) self.assertEqual("", stderr.getvalue()) todel = proto.to_delete self.assertTrue(len(todel) > 0) # ...because we know it's a TorProcessProtocol :/ proto.cleanup() self.assertEqual(len(proto.to_delete), 0) for f in todel: self.assertTrue(not os.path.exists(f)) self.assertEqual(proto._timeout_delayed_call, None) # make sure we set up the config to track the created tor # protocol connection self.assertEquals(config.protocol, proto.tor_protocol) def setup_complete_fails(self, proto, stdout, stderr): self.assertEqual("Bootstrapped 90%\n", stdout.getvalue()) self.assertEqual("", stderr.getvalue()) todel = proto.to_delete self.assertTrue(len(todel) > 0) # the "12" is just arbitrary, we check it later in the error-message proto.processEnded( Failure(error.ProcessTerminated(12, None, 'statusFIXME')) ) self.assertEqual(1, len(self.flushLoggedErrors(RuntimeError))) self.assertEqual(len(proto.to_delete), 0) for f in todel: self.assertTrue(not os.path.exists(f)) return None @patch('txtorcon.torconfig.os.geteuid') def test_basic_launch(self, geteuid): # pretend we're root to exercise the "maybe chown data dir" codepath geteuid.return_value = 0 config = TorConfig() config.ORPort = 1234 config.SOCKSPort = 9999 config.User = getuser() def connector(proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap class OnProgress: def __init__(self, test, expected): self.test = test self.expected = expected def __call__(self, percent, tag, summary): self.test.assertEqual( self.expected[0], (percent, tag, summary) ) self.expected = self.expected[1:] self.test.assertTrue('"' not in summary) self.test.assertTrue(percent >= 0 and percent <= 100) def on_protocol(proto): proto.outReceived('Bootstrapped 100%\n') proto.progress = OnProgress( self, [ (90, 'circuit_create', 'Establishing a Tor circuit'), (100, 'done', 'Done'), ] ) trans = FakeProcessTransport() trans.protocol = self.protocol fakeout = StringIO() fakeerr = StringIO() creator = functools.partial(connector, self.protocol, self.transport) d = launch_tor( config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo', stdout=fakeout, stderr=fakeerr ) d.addCallback(self.setup_complete_no_errors, config, fakeout, fakeerr) return d def check_setup_failure(self, fail): self.assertTrue("with error-code 12" in fail.getErrorMessage()) # cancel the errback chain, we wanted this return None def test_launch_tor_fails(self): config = TorConfig() config.OrPort = 1234 config.SocksPort = 9999 def connector(proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap def on_protocol(proto): proto.outReceived('Bootstrapped 90%\n') trans = FakeProcessTransport() trans.protocol = self.protocol fakeout = StringIO() fakeerr = StringIO() creator = functools.partial(connector, self.protocol, self.transport) d = launch_tor( config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo', stdout=fakeout, stderr=fakeerr ) d.addCallback(self.setup_complete_fails, fakeout, fakeerr) self.flushLoggedErrors(RuntimeError) return d def test_launch_with_timeout_no_ireactortime(self): config = TorConfig() return self.assertRaises( RuntimeError, launch_tor, config, None, timeout=5, tor_binary='/bin/echo' ) @patch('txtorcon.torconfig.sys') @patch('txtorcon.torconfig.pwd') @patch('txtorcon.torconfig.os.geteuid') @patch('txtorcon.torconfig.os.chown') def test_launch_root_changes_tmp_ownership(self, chown, euid, _pwd, _sys): _pwd.return_value = 1000 _sys.platform = 'linux2' euid.return_value = 0 config = TorConfig() config.User = 'chuffington' d = launch_tor(config, Mock(), tor_binary='/bin/echo') self.assertEqual(1, chown.call_count) @defer.inlineCallbacks def test_launch_timeout_exception(self): self.protocol = FakeControlProtocol([]) self.protocol.answers.append('''config/names= DataDirectory String ControlPort Port''') self.protocol.answers.append({'DataDirectory': 'foo'}) self.protocol.answers.append({'ControlPort': 0}) config = TorConfig(self.protocol) yield config.post_bootstrap config.DataDirectory = '/dev/null' trans = Mock() d = launch_tor( config, FakeReactor(self, trans, Mock()), tor_binary='/bin/echo' ) tpp = yield d tpp.transport = trans trans.signalProcess = Mock(side_effect=error.ProcessExitedAlready) trans.loseConnection = Mock() tpp.timeout_expired() self.assertTrue(tpp.transport.loseConnection.called) @defer.inlineCallbacks def test_launch_timeout_process_exits(self): # cover the "one more edge case" where we get a processEnded() # but we've already "done" a timeout. self.protocol = FakeControlProtocol([]) self.protocol.answers.append('''config/names= DataDirectory String ControlPort Port''') self.protocol.answers.append({'DataDirectory': 'foo'}) self.protocol.answers.append({'ControlPort': 0}) config = TorConfig(self.protocol) yield config.post_bootstrap config.DataDirectory = '/dev/null' trans = Mock() d = launch_tor( config, FakeReactor(self, trans, Mock()), tor_binary='/bin/echo' ) tpp = yield d tpp.timeout_expired() tpp.transport = trans trans.signalProcess = Mock() trans.loseConnection = Mock() status = Mock() status.value.exitCode = None self.assertTrue(tpp._did_timeout) tpp.processEnded(status) errs = self.flushLoggedErrors(RuntimeError) self.assertEqual(1, len(errs)) def test_launch_wrong_stdout(self): config = TorConfig() try: launch_tor(config, None, stdout=object(), tor_binary='/bin/echo') self.fail("Should have thrown an error") except RuntimeError: pass def test_launch_with_timeout(self): config = TorConfig() config.OrPort = 1234 config.SocksPort = 9999 timeout = 5 def connector(proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap class OnProgress: def __init__(self, test, expected): self.test = test self.expected = expected def __call__(self, percent, tag, summary): self.test.assertEqual( self.expected[0], (percent, tag, summary) ) self.expected = self.expected[1:] self.test.assertTrue('"' not in summary) self.test.assertTrue(percent >= 0 and percent <= 100) def on_protocol(proto): proto.outReceived('Bootstrapped 100%\n') trans = FakeProcessTransportNeverBootstraps() trans.protocol = self.protocol creator = functools.partial(connector, self.protocol, self.transport) react = FakeReactor(self, trans, on_protocol) d = launch_tor(config, react, connection_creator=creator, timeout=timeout, tor_binary='/bin/echo') # FakeReactor is a task.Clock subclass and +1 just to be sure react.advance(timeout + 1) self.assertTrue(d.called) self.assertTrue( d.result.getErrorMessage().strip().endswith('Tor was killed (TERM).') ) self.flushLoggedErrors(RuntimeError) return self.assertFailure(d, RuntimeError) def test_launch_with_timeout_that_doesnt_expire(self): config = TorConfig() config.OrPort = 1234 config.SocksPort = 9999 timeout = 5 def connector(proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap class OnProgress: def __init__(self, test, expected): self.test = test self.expected = expected def __call__(self, percent, tag, summary): self.test.assertEqual( self.expected[0], (percent, tag, summary) ) self.expected = self.expected[1:] self.test.assertTrue('"' not in summary) self.test.assertTrue(percent >= 0 and percent <= 100) def on_protocol(proto): proto.outReceived('Bootstrapped 100%\n') trans = FakeProcessTransport() trans.protocol = self.protocol creator = functools.partial(connector, self.protocol, self.transport) react = FakeReactor(self, trans, on_protocol) d = launch_tor(config, react, connection_creator=creator, timeout=timeout, tor_binary='/bin/echo') # FakeReactor is a task.Clock subclass and +1 just to be sure react.advance(timeout + 1) self.assertTrue(d.called) self.assertTrue(d.result.tor_protocol == self.protocol) def setup_fails_stderr(self, fail, stdout, stderr): self.assertEqual('', stdout.getvalue()) self.assertEqual('Something went horribly wrong!\n', stderr.getvalue()) self.assertTrue( 'Something went horribly wrong!' in fail.getErrorMessage() ) # cancel the errback chain, we wanted this return None def test_tor_produces_stderr_output(self): config = TorConfig() config.OrPort = 1234 config.SocksPort = 9999 def connector(proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap def on_protocol(proto): proto.errReceived('Something went horribly wrong!\n') trans = FakeProcessTransport() trans.protocol = self.protocol fakeout = StringIO() fakeerr = StringIO() creator = functools.partial(connector, self.protocol, self.transport) d = launch_tor(config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo', stdout=fakeout, stderr=fakeerr) d.addCallback(self.fail) # should't get callback d.addErrback(self.setup_fails_stderr, fakeout, fakeerr) self.assertFalse(self.protocol.on_disconnect) return d def test_tor_connection_fails(self): """ We fail to connect once, and then successfully connect -- testing whether we're retrying properly on each Bootstrapped line from stdout. """ config = TorConfig() config.OrPort = 1234 config.SocksPort = 9999 class Connector: count = 0 def __call__(self, proto, trans): self.count += 1 if self.count < 2: return defer.fail( error.CannotListenError(None, None, None) ) proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap def on_protocol(proto): proto.outReceived('Bootstrapped 90%\n') trans = FakeProcessTransport() trans.protocol = self.protocol creator = functools.partial(Connector(), self.protocol, self.transport) d = launch_tor( config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo' ) d.addCallback(self.setup_complete_fails) return self.assertFailure(d, Exception) def test_tor_connection_user_data_dir(self): """ Test that we don't delete a user-supplied data directory. """ config = TorConfig() config.OrPort = 1234 class Connector: def __call__(self, proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap def on_protocol(proto): proto.outReceived('Bootstrapped 90%\n') my_dir = tempfile.mkdtemp(prefix='tortmp') config.DataDirectory = my_dir trans = FakeProcessTransport() trans.protocol = self.protocol creator = functools.partial(Connector(), self.protocol, self.transport) d = launch_tor( config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo' ) def still_have_data_dir(proto, tester): proto.cleanup() # FIXME? not really unit-testy as this is sort of internal function tester.assertTrue(os.path.exists(my_dir)) delete_file_or_tree(my_dir) d.addCallback(still_have_data_dir, self) d.addErrback(self.fail) return d def test_tor_connection_user_control_port(self): """ Confirm we use a user-supplied control-port properly """ config = TorConfig() config.OrPort = 1234 config.ControlPort = 4321 class Connector: def __call__(self, proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap def on_protocol(proto): proto.outReceived('Bootstrapped 90%\n') proto.outReceived('Bootstrapped 100%\n') trans = FakeProcessTransport() trans.protocol = self.protocol creator = functools.partial(Connector(), self.protocol, self.transport) d = launch_tor( config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo' ) def check_control_port(proto, tester): # we just want to ensure launch_tor() didn't mess with # the controlport we set tester.assertEquals(config.ControlPort, 4321) d.addCallback(check_control_port, self) d.addErrback(self.fail) return d def test_tor_connection_default_control_port(self): """ Confirm a default control-port is set if not user-supplied. """ config = TorConfig() class Connector: def __call__(self, proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap def on_protocol(proto): proto.outReceived('Bootstrapped 90%\n') proto.outReceived('Bootstrapped 100%\n') trans = FakeProcessTransport() trans.protocol = self.protocol creator = functools.partial(Connector(), self.protocol, self.transport) d = launch_tor( config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo' ) def check_control_port(proto, tester): # ensure ControlPort was set to a default value tester.assertEquals(config.ControlPort, 9052) d.addCallback(check_control_port, self) d.addErrback(self.fail) return d def test_progress_updates(self): self.got_progress = False def confirm_progress(p, t, s): self.assertEqual(p, 10) self.assertEqual(t, 'tag') self.assertEqual(s, 'summary') self.got_progress = True process = TorProcessProtocol(None, confirm_progress) process.progress(10, 'tag', 'summary') self.assertTrue(self.got_progress) def test_status_updates(self): process = TorProcessProtocol(None) process.status_client("NOTICE CONSENSUS_ARRIVED") def test_tor_launch_success_then_shutdown(self): """ There was an error where we double-callbacked a deferred, i.e. success and then shutdown. This repeats it. """ process = TorProcessProtocol(None) process.status_client( 'STATUS_CLIENT BOOTSTRAP PROGRESS=100 TAG=foo SUMMARY=cabbage' ) self.assertEqual(None, process.connected_cb) class Value(object): exitCode = 123 class Status(object): value = Value() process.processEnded(Status()) self.assertEquals(len(self.flushLoggedErrors(RuntimeError)), 1) def test_launch_tor_no_control_port(self): ''' See Issue #80. This allows you to launch tor with a TorConfig with ControlPort=0 in case you don't want a control connection at all. In this case you get back a TorProcessProtocol and you own both pieces. (i.e. you have to kill it yourself). ''' config = TorConfig() config.ControlPort = 0 trans = FakeProcessTransportNoProtocol() trans.protocol = self.protocol def creator(*args, **kw): print "Bad: connection creator called" self.fail() def on_protocol(proto): self.process_proto = proto pp = launch_tor(config, FakeReactor(self, trans, on_protocol), connection_creator=creator, tor_binary='/bin/echo') self.assertTrue(pp.called) self.assertEqual(pp.result, self.process_proto) return pp class IteratorTests(unittest.TestCase): def test_iterate_torconfig(self): cfg = TorConfig() cfg.FooBar = 'quux' cfg.save() cfg.Quux = 'blimblam' keys = sorted([k for k in cfg]) self.assertEqual(['FooBar', 'Quux'], keys) class ErrorTests(unittest.TestCase): @patch('txtorcon.torconfig.find_tor_binary') def test_no_tor_binary(self, ftb): """FIXME: do I really need all this crap in here?""" self.transport = proto_helpers.StringTransport() config = TorConfig() d = None class Connector: def __call__(self, proto, trans): proto._set_valid_events('STATUS_CLIENT') proto.makeConnection(trans) proto.post_bootstrap.callback(proto) return proto.post_bootstrap self.protocol = FakeControlProtocol([]) torconfig.find_tor_binary = lambda: None trans = FakeProcessTransport() trans.protocol = self.protocol creator = functools.partial(Connector(), self.protocol, self.transport) try: d = launch_tor( config, FakeReactor(self, trans, lambda x: None), connection_creator=creator ) self.fail() except TorNotFound: pass # success! return d # the RSA keys have been shortened below for readability keydata = '''client-name bar descriptor-cookie O4rQyZ+IJr2PNHUdeXi0nA== client-key -----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQC1R/bPGTWnpGJpNCfT1KIfFq1QEGHz4enKSEKUDkz1CSEPOMGS bV37dfqTuI4klsFvdUsR3NpYXLin9xRWvw1viKwAN0y8cv5totl4qMxO5i+zcfVh bJiNvVv2EjfEyQaZfAy2PUfp/tAPYZMsyfps2DptWyNR -----END RSA PRIVATE KEY----- client-name foo descriptor-cookie btlj4+RsWEkxigmlszInhQ== client-key -----BEGIN RSA PRIVATE KEY----- MIICXgIBAAKBgQDdLdHU1fbABtFutOFtpdWQdv/9qG1OAc0r1TfaBtkPSNcLezcx SThalIEnRFfejy0suOHmsqspruvn0FEflIEQvFWeXAPvXg== -----END RSA PRIVATE KEY----- client-name quux descriptor-cookie asdlkjasdlfkjalsdkfffj== ''' class HiddenServiceAuthTests(unittest.TestCase): def test_parse_client_keys(self): data = StringIO(keydata) clients = list(parse_client_keys(data)) self.assertEqual(3, len(clients)) self.assertEqual('bar', clients[0].name) self.assertEqual('O4rQyZ+IJr2PNHUdeXi0nA', clients[0].cookie) self.assertEqual('MIICXQIBAAKBgQC1R/bPGTWnpGJpNCfT1KIfFq1QEGHz4enKSEKUDkz1CSEPOMGSbV37dfqTuI4klsFvdUsR3NpYXLin9xRWvw1viKwAN0y8cv5totl4qMxO5i+zcfVhbJiNvVv2EjfEyQaZfAy2PUfp/tAPYZMsyfps2DptWyNR', clients[0].key) self.assertEqual('foo', clients[1].name) self.assertEqual('btlj4+RsWEkxigmlszInhQ', clients[1].cookie) self.assertEqual(clients[1].key, 'MIICXgIBAAKBgQDdLdHU1fbABtFutOFtpdWQdv/9qG1OAc0r1TfaBtkPSNcLezcxSThalIEnRFfejy0suOHmsqspruvn0FEflIEQvFWeXAPvXg==') self.assertEqual('quux', clients[2].name) self.assertEqual('asdlkjasdlfkjalsdkfffj', clients[2].cookie) self.assertEqual(None, clients[2].key) def test_parse_error(self): data = StringIO('client-name foo\nclient-name xxx\n') self.assertRaises( RuntimeError, parse_client_keys, data )
# Copyright (c) 2017 Museum Victoria # This software is released under the MIT license (see license.txt for details) ''''Projection Design Node''' ### Libraries required by this Node import socket import struct ### Parameters used by this Node PORT = 1025 param_ipAddress = Parameter('{"title":"IP Address","schema":{"type":"string"}}') param_debug = Parameter('{"title":"Debug Mode","schema":{"type":"boolean"}}') param_f35 = Parameter('{"title":"F35 Support","schema":{"type":"boolean"}}') inputs_f35 = ['VGA 1', 'VGA 2', 'DVI 1', '4', '5', '6', 'Component', 'HDMI 1', '9', 'DVI 2', 'HDMI 2', 'Dual Head DVI', 'Dual Head HDMI', 'Dual Head XP2', 'XP2 A', 'XP2 B'] ### Local events this Node provides local_event_Power = LocalEvent({'group': 'Power', 'schema': {'type': 'string', 'enum': ['On', 'Off', 'Partially On', 'Partially Off']}}) local_event_DesiredPower = LocalEvent({'group': 'Power', 'schema': {'type': 'string', 'enum': ['On', 'Off']}}) local_event_Error = LocalEvent('{ "title": "Error", "desc": "Error", "group": "General" }') local_event_LampHours = LocalEvent({'group': 'Information', 'desc': 'The lamps hours for each lamp (comma separated)', 'order': next_seq(), 'schema': {'type': 'string'}}) ### Main def main(arg = None): if len((param_ipAddress or '').strip()) == 0: console.warn('No IP address configured; nothing to do') poller_lampHours.stop() status_timer.stop() timer_powerRetriever.stop() return ### Functions used by this Node def send_cmd(cmd, arg=None): #open socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(10) try: sock.connect((param_ipAddress, PORT)) packet = ":"+cmd if(arg): packet += " "+arg packet+="\r" sock.send(packet) if(param_debug): print 'Sent', packet data = sock.recv(1024) if(param_debug): print 'Received', data rcvpack = struct.unpack("<xxxxx4sx6sx", data[0:17]) assert cmd in rcvpack[0] # if(arg): assert arg in rcvpack[1] # packet 1 contains response code, not send arg return rcvpack[1] except socket.error, e: print "socket error: %s\n" % e local_event_Error.emit(e) except AssertionError, e: print "command error: %s\n" % e local_event_Error.emit(e) finally: lastReceive[0] = system_clock() sock.close() ### Local actions this Node provides def local_action_Power(arg = None): '''{"title": "Power", "group": "Power", "desc": "Turns the projector on or off.", "schema": {"type": "string", "enum": ["On", "Off"]}}''' console.info('Power %s' % arg) local_event_DesiredPower.emit(arg) if arg == 'On': print 'Action PowerOn requested' send_cmd('POWR', '1') elif arg == 'Off': print 'Action PowerOff requested' send_cmd('POWR', '0') def local_action_PowerOn(arg = None): """{"title":"PowerOn","desc":"PowerOn","group":"Power"}""" lookup_local_action('Power').call('On') def local_action_PowerOff(arg = None): """{"title":"PowerOff","desc":"PowerOff","group":"Power"}""" lookup_local_action('Power').call('Off') def local_action_GetPower(arg = None): """{"title":"GetPower","desc":"GetPower","group":"Information"}""" #print 'Action GetPower requested' result = send_cmd('POST', '?') if(result=='000005' or result=='000006'): print 'critical power off' local_event_Power.emit('Off') if(result=='000004'): print 'powering down' if(result=='000003'): #print 'power is on' local_event_Power.emit('On') if(result=='000002'): print 'powering up' if(result=='000000' or result=='000001'): #print 'power is off' local_event_Power.emit('Off') def local_action_MuteOn(arg = None): """{"title":"MuteOn","desc":"MuteOn","group":"Picture"}""" print 'Action MuteOn requested' send_cmd('PMUT', '1') def local_action_MuteOff(arg = None): """{"title":"MuteOff","desc":"MuteOff","group":"Picture"}""" print 'Action MuteOff requested' send_cmd('PMUT', '0') def local_action_GetLampHours(x = None): """{ "title": "GetLampHours", "desc": "GetLampHours", "group": "Information" }""" print 'Action GetLampHours requested' lampHours = list() # Add lamp hours. lampHours.append(send_cmd('LTR1', '?').strip("0")) # Lamp 1 lampHours.append(send_cmd('LTR2', '?').strip("0")) # Lamp 1 # Announce hours. local_event_LampHours.emit(', '.join(lampHours)) # <!--- device status DEFAULT_LAMPHOURUSE = 1800 param_warningThresholds = Parameter({'title': 'Warning thresholds', 'schema': {'type': 'object', 'properties': { 'lampUseHours': {'title': 'Lamp use (hours)', 'type': 'integer', 'hint': str(DEFAULT_LAMPHOURUSE), 'order': 1} }}}) lampUseHoursThreshold = DEFAULT_LAMPHOURUSE @after_main def init_lamp_hours_support(): global lampUseHoursThreshold lampUseHoursThreshold = (param_warningThresholds or {}).get('lampUseHours') or lampUseHoursThreshold # poll every 4 hours, 30s first time. poller_lampHours = Timer(lambda: lookup_local_action('GetLampHours').call(), 4*3600, 30) local_event_Status = LocalEvent({'title': 'Status', 'group': 'Status', 'order': 9990, "schema": { 'title': 'Status', 'type': 'object', 'properties': { 'level': {'title': 'Level', 'order': next_seq(), 'type': 'integer'}, 'message': {'title': 'Message', 'order': next_seq(), 'type': 'string'} } } }) lastReceive = [0] # roughly, the last contact local_event_LastContactDetect = LocalEvent({'group': 'Status', 'title': 'Last contact detect', 'schema': {'type': 'string'}}) def statusCheck(): lampUseHours = max([int(x) for x in (local_event_LampHours.getArg() or '0').split(',')]) diff = (system_clock() - lastReceive[0])/1000.0 # (in secs) now = date_now() # the list of status items as (category, statusInfo) tuples statuses = list() if diff > status_check_interval+15: previousContactValue = local_event_LastContactDetect.getArg() if previousContactValue == None: message = 'Always been missing.' else: previousContact = date_parse(previousContactValue) roughDiff = (now.getMillis() - previousContact.getMillis())/1000/60 if roughDiff < 60: # less than an hour, show just minutes message = 'Missing for approx. %s mins' % roughDiff elif roughDiff < (60*24): # less than a day, concise time is useful message = 'Missing since %s' % previousContact.toString('h:mm:ss a') else: # more than a day, concise date and time message = 'Missing since %s' % previousContact.toString('h:mm:ss a, E d-MMM') local_event_Status.emit({'level': 2, 'message': message}) # (is offline so no point checking any other statuses) return # check lamp hours if lampUseHours > lampUseHoursThreshold: statuses.append(('Lamp usage', {'level': 1, 'message': 'Lamp usage is %s hours which is %s above the replacement threshold of %s. It may need replacement.' % (lampUseHours, lampUseHours-lampUseHoursThreshold, lampUseHoursThreshold)})) # aggregate the statuses aggregateLevel = 0 aggregateMessage = 'OK' msgs = list() for key, status in statuses: level = status['level'] if level > 0: if level > aggregateLevel: aggregateLevel = level # raise the level del msgs[:] # clear message list because of a complete new (higher) level if level == aggregateLevel: # keep adding messages of equal status level msgs.append('%s: [%s]' % (key, status['message'])) # add the message if aggregateLevel > 0: aggregateMessage = ', '.join(msgs) local_event_Status.emit({'level': aggregateLevel, 'message': aggregateMessage}) local_event_LastContactDetect.emit(str(now)) status_check_interval = 12*60 # check every 12 minutes status_timer = Timer(statusCheck, status_check_interval, 30) # 10 minute power checker timer_powerRetriever = Timer(lambda: lookup_local_action('GetPower').call(), 10*60, 20) # device status ---> # <--- f35 support @after_main def init_F35_support(): if param_f35: # Set Stereo Mode def handler_get_stereo(arg): print 'Action GetStereoMode requested' result = send_cmd('TDSM', '?') if(result=='000002'): print '3D stereo mode: side by side' if(result=='000001'): print '3D stereo mode: frame sequential' if(result=='000000'): print '3D stereo mode: off' meta_get_stereo = { "title": "GetStereoMode", "desc": "GetStereoMode", "group": "Information" } Action('GetStereoMode', handler_get_stereo, meta_get_stereo) # Get Stereo Mode def handler_set_stereo(arg): print 'Action SetStereoMode requested' if arg == 'OFF': send_cmd('TDSM', '0') if arg == 'FRAME SEQUENTIAL': send_cmd('TDSM', '1') if arg == 'SIDE BY SIDE': send_cmd('TDSM', '2') meta_set_stereo = {"title":"Stereo Mode","required":'true',"schema":{"type":"string","enum": ['OFF', 'FRAME SEQUENTIAL', 'SIDE BY SIDE']}, 'group': '3D'} Action('SetStereoMode', handler_set_stereo, meta_set_stereo) # Dual Head On def handler_dualhead_on(arg): print 'Action DualHeadOn requested' send_cmd('DHED', '1') meta_dualhead_on = {"title":"DualHeadOn","desc":"DualHeadOn","group":"3D"} Action('DualHeadOn', handler_dualhead_on, meta_dualhead_on) # Dual Head Off def handler_dualhead_off(arg): print 'Action DualHeadOff requested' send_cmd('DHED', '0') meta_dualhead_off = {"title":"DualHeadOff","desc":"DualHeadOff","group":"3D"} Action('DualHeadOff', handler_dualhead_off, meta_dualhead_off) # Get Dual Head def handler_get_dualhead(arg): print 'Action GetDualHead requested' result = send_cmd('DHED', '?') if(result=='000001'): print 'dual head setup mode: on' if(result=='000000'): print 'dual head setup mode: off' meta_get_dualhead = {"title":"GetDualHead","desc":"GetDualHead","group":"Information"} Action('GetDualHead', handler_get_dualhead, meta_get_dualhead) # Set Input def handler_set_input(arg): print 'Action SetInput requested: '+arg if arg == 'HDMI 1': send_cmd('IABS', '8') if arg == 'HDMI 2': send_cmd('TDSM', '11') if arg == 'Dual Head HDMI': send_cmd('TDSM', '13') send_cmd(arg) meta_set_input = {"title":"Set input","desc":"SetInput","group":"Input","schema":{"type":"string", "title": "Source", "required":"true", "enum": ['HDMI 1', 'HDMI 2', 'Dual Head HDMI'] } } Action('SetInput', handler_set_input, meta_set_input) # Get Input def handler_get_input(arg): result = int(send_cmd('IABS', '?').strip("0")) print 'source: ', inputs_f35[result - 1] meta_get_input = {"title":"GetInput","desc":"GetInput","group":"Information"} Action('GetInput', handler_get_input, meta_get_input) # f35 support --->
""" MAP Client, a program to generate detailed musculoskeletal models for OpenSim. Copyright (C) 2012 University of Auckland This file is part of MAP Client. (http://launchpad.net/mapclient) MAP Client is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. MAP Client is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with MAP Client. If not, see <http://www.gnu.org/licenses/>.. """ import os from PySide2 import QtGui from PySide2.QtWidgets import QDialog, QAbstractItemView, QTableWidgetItem, QDoubleSpinBox, QLabel, QWidget from PySide2.QtGui import QIntValidator from PySide2.QtCore import Qt from PySide2.QtCore import QThread, Signal from mapclientplugins.fieldworklowerlimb2sidegenerationstep.ui_lowerlimbgenerationdialog import Ui_Dialog from gias3.mapclientpluginutilities.viewers import MayaviViewerObjectsContainer, MayaviViewerLandmark, \ MayaviViewerFieldworkModel, colours from mapclientplugins.fieldworklowerlimb2sidegenerationstep.landmarktablewidget import LandmarkComboBoxTable from mapclientplugins.fieldworklowerlimb2sidegenerationstep.llstep import validModelLandmarks import numpy as np import math os.environ['ETS_TOOLKIT'] = 'qt' class _ExecThread(QThread): update = Signal(tuple) callback = Signal(tuple) def __init__(self, func): QThread.__init__(self) self.func = func def run(self): # NOT USING CALLBACK since (probably due to threading) not all # bone models update in synchrony # output = self.func(self.callback) output = self.func() self.update.emit(output) class LowerLimbGenerationDialog(QDialog): """ Configure dialog to present the user with the options to configure this step. """ defaultColor = colours['bone'] objectTableHeaderColumns = {'Visible': 0} backgroundColour = (0.0, 0.0, 0.0) _modelRenderArgs = {} _modelDisc = [8, 8] _landmarkRenderArgs = {'mode': 'sphere', 'scale_factor': 20.0, 'color': (0, 1, 0)} _landmarkAdjRenderArgs = {'mode': 'sphere', 'scale_factor': 15.0, 'color': (1, 0, 0)} def __init__(self, data, done_execution, parent=None): """ Constructor """ QDialog.__init__(self, parent) self._ui = Ui_Dialog() self._ui.setupUi(self) self._scene = self._ui.MayaviScene.visualisation.scene self._scene.background = self.backgroundColour self.data = data self.data.regCallback = self._regCallback self.done_execution = done_execution self._lockManualRegUpdate = False self.selectedObjectName = None self._worker = _ExecThread(self.data.register) self._worker.update.connect(self._regUpdate) self._worker.callback.connect(self._regCallback) self.doubleSpinBox_pcs = [] self.labels = [] # FIX FROM HERE # self._initViewerObjects() self._setupGui() self._makeConnections() self._initialiseObjectTable() self._updateConfigs() self._refresh() def _initViewerObjects(self): self._objects = MayaviViewerObjectsContainer() for mn, m in list(self.data.LL.models.items()): self._objects.addObject(mn, MayaviViewerFieldworkModel(mn, m.gf, self._modelDisc, render_args=self._modelRenderArgs ) ) # 'none' is first elem in self._landmarkNames, so skip that for ln, lcoords in sorted(self.data.inputLandmarks.items()): print(('{} {}'.format(ln, lcoords))) self._objects.addObject(ln, MayaviViewerLandmark(ln, lcoords, render_args=self._landmarkRenderArgs ) ) for li, lcoords in enumerate(self.data.targetLandmarks): ln = self.data.targetLandmarkNames[li] + '_adjusted' print(('{} {} {}'.format(li, ln, lcoords))) self._objects.addObject(ln, MayaviViewerLandmark(ln, lcoords, render_args=self._landmarkAdjRenderArgs ) ) def _setupGui(self): # screenshot page self._ui.screenshotPixelXLineEdit.setValidator(QIntValidator()) self._ui.screenshotPixelYLineEdit.setValidator(QIntValidator()) # landmarks page valid_input_landmarks = sorted(self.data.inputLandmarks.keys()) self.landmarkTable = LandmarkComboBoxTable( validModelLandmarks, valid_input_landmarks, self._ui.tableWidgetLandmarks, ) # auto reg page self._ui.spinBox_pcsToFit.setMaximum(self.data.LL.SHAPEMODESMAX) for regmode in self.data.validRegistrationModes: self._ui.comboBox_regmode.addItem(regmode) # disable manual scaling adjustment, just use the shape model self._ui.doubleSpinBox_scaling.setEnabled(False) def _updateConfigs(self): # landmarks page self.landmarkTable.clearTable() for ml, il in sorted(self.data.config['landmarks'].items()): self.landmarkTable.addLandmark(ml, il) self._ui.doubleSpinBox_markerRadius.setValue(self.data.markerRadius) self._ui.doubleSpinBox_skinPad.setValue(self.data.skinPad) # manual reg page # This block could definitely be done better weights = self.data.LL.shape_mode_weights for index, weight in enumerate(weights): # Only update the spinBoxes that currently exist if index >= len(self.doubleSpinBox_pcs): break self.doubleSpinBox_pcs[index].setValue(weight) # self._ui.doubleSpinBox_scaling.setValue(self.data.T.uniformScaling) self._ui.doubleSpinBox_ptx.setValue(self.data.LL.pelvis_rigid[0]) self._ui.doubleSpinBox_pty.setValue(self.data.LL.pelvis_rigid[1]) self._ui.doubleSpinBox_ptz.setValue(self.data.LL.pelvis_rigid[2]) self._ui.doubleSpinBox_prx.setValue(np.rad2deg(self.data.LL.pelvis_rigid[3])) self._ui.doubleSpinBox_pry.setValue(np.rad2deg(self.data.LL.pelvis_rigid[4])) self._ui.doubleSpinBox_prz.setValue(np.rad2deg(self.data.LL.pelvis_rigid[5])) self._ui.doubleSpinBox_hiplx.setValue(np.rad2deg(self.data.LL.hip_rot_l[0])) self._ui.doubleSpinBox_hiply.setValue(np.rad2deg(self.data.LL.hip_rot_l[1])) self._ui.doubleSpinBox_hiplz.setValue(np.rad2deg(self.data.LL.hip_rot_l[2])) self._ui.doubleSpinBox_hiprx.setValue(np.rad2deg(self.data.LL.hip_rot_r[0])) self._ui.doubleSpinBox_hipry.setValue(np.rad2deg(self.data.LL.hip_rot_r[1])) self._ui.doubleSpinBox_hiprz.setValue(np.rad2deg(self.data.LL.hip_rot_r[2])) rot_l = self.data.LL.knee_rot_l rot_r = self.data.LL.knee_rot_r axis = ['x', 'y', 'z'] for index, rot_l_dim in enumerate(rot_l): getattr(self._ui, f'doubleSpinBox_kneel{axis[index]}').setValue(np.rad2deg(rot_l_dim)) for index, rot_r_dim in enumerate(rot_r): getattr(self._ui, f'doubleSpinBox_kneer{axis[index]}').setValue(np.rad2deg(rot_r_dim)) # auto reg page self._ui.comboBox_regmode.setCurrentIndex( self.data.validRegistrationModes.index( self.data.registrationMode, ) ) self._ui.spinBox_pcsToFit.setValue(self.data.nShapeModes) self._ui.spinBox_mWeight.setValue(self.data.mWeight) self._ui.checkBox_kneecorr.setChecked(bool(self.data.kneeCorr)) self._ui.checkBox_kneedof.setChecked(bool(self.data.kneeDOF)) def _updateNShapeModes(self): self.data.nShapeModes = self._ui.spinBox_pcsToFit.value() def _saveConfigs(self): # landmarks page self.data.config['landmarks'] = self.landmarkTable.getLandmarkPairs() self.data.markerRadius = self._ui.doubleSpinBox_markerRadius.value() self.data.skinPad = self._ui.doubleSpinBox_skinPad.value() # manual reg page self._saveLLParams() # auto reg page self.data.registrationMode = str(self._ui.comboBox_regmode.currentText()) self.data.nShapeModes = self._ui.spinBox_pcsToFit.value() self.data.mWeight = self._ui.spinBox_mWeight.value() self.data.kneeCorr = self._ui.checkBox_kneecorr.isChecked() self.data.kneeDOF = self._ui.checkBox_kneedof.isChecked() self._ui.checkBox_kneecorr.setChecked(bool(self.data.kneeCorr)) self._ui.checkBox_kneedof.setChecked(bool(self.data.kneeDOF)) def _saveLLParams(self): shape_mode_weights = np.array(self.data.LL.shape_mode_weights) for index in range(len(shape_mode_weights)): shape_mode_weights[index] = self.doubleSpinBox_pcs[index].value() pelvis_rigid = [ self._ui.doubleSpinBox_ptx.value(), self._ui.doubleSpinBox_pty.value(), self._ui.doubleSpinBox_ptz.value(), np.deg2rad(self._ui.doubleSpinBox_prx.value()), np.deg2rad(self._ui.doubleSpinBox_pry.value()), np.deg2rad(self._ui.doubleSpinBox_prz.value()), ] hip_rot_l = [ np.deg2rad(self._ui.doubleSpinBox_hiplx.value()), np.deg2rad(self._ui.doubleSpinBox_hiply.value()), np.deg2rad(self._ui.doubleSpinBox_hiplz.value()), ] hip_rot_r = [ np.deg2rad(self._ui.doubleSpinBox_hiprx.value()), np.deg2rad(self._ui.doubleSpinBox_hipry.value()), np.deg2rad(self._ui.doubleSpinBox_hiprz.value()), ] if self.data.kneeDOF: knee_rot_l = [ np.deg2rad(self._ui.doubleSpinBox_kneelx.value()), np.deg2rad(self._ui.doubleSpinBox_kneelz.value()), ] knee_rot_r = [ np.deg2rad(self._ui.doubleSpinBox_kneerx.value()), np.deg2rad(self._ui.doubleSpinBox_kneerz.value()), ] else: knee_rot_l = [np.deg2rad(self._ui.doubleSpinBox_kneelx.value()), ] knee_rot_r = [np.deg2rad(self._ui.doubleSpinBox_kneerx.value()), ] self.data.LL.update_all_models( shape_mode_weights[self.data.LL.shape_modes], self.data.LL.shape_modes, pelvis_rigid, hip_rot_l, hip_rot_r, knee_rot_l, knee_rot_r, ) def _makeConnections(self): self._ui.tableWidget.itemClicked.connect(self._tableItemClicked) self._ui.tableWidget.itemChanged.connect(self._visibleBoxChanged) self._ui.screenshotSaveButton.clicked.connect(self._saveScreenShot) # landmarks # self.landmarktablewidget.table.itemClicked.connect(self._saveConfigs) self.landmarkTable.table.itemChanged.connect(self._saveConfigs) self._ui.pushButton_addLandmark.clicked.connect(self.landmarkTable.addLandmark) self._ui.pushButton_removeLandmark.clicked.connect(self.landmarkTable.removeLandmark) # self._ui.doubleSpinBox_scaling.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_ptx.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_pty.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_ptz.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_prx.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_pry.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_prz.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_hiplx.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_hiply.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_hiplz.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_hiprx.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_hipry.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_hiprz.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_kneelx.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_kneely.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_kneelz.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_kneerx.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_kneery.valueChanged.connect(self._manualRegUpdate) self._ui.doubleSpinBox_kneerz.valueChanged.connect(self._manualRegUpdate) self._ui.pushButton_manual_reset.clicked.connect(self._reset) self._ui.pushButton_manual_accept.clicked.connect(self._accept) # auto reg self._ui.checkBox_kneecorr.stateChanged.connect(self._autoRegChanged) self._ui.checkBox_kneedof.stateChanged.connect(self._autoRegChanged) self._ui.pushButton_auto_reset.clicked.connect(self._reset) self._ui.pushButton_auto_accept.clicked.connect(self._accept) self._ui.pushButton_auto_abort.clicked.connect(self._abort) self._ui.pushButton_auto_reg.clicked.connect(self._autoReg) self._ui.spinBox_pcsToFit.valueChanged.connect(self._pcsToFitChanged) def _pcsToFitChanged(self): # delta represents a change in the number of PCs. (+) means we need to increase, (-) means we need to decrease delta = self._ui.spinBox_pcsToFit.value() - int(self._ui.gridLayout_3.count() / 2) # If delta is negative, delete the specified number of widgets from the layout if delta < 0: for _ in range(abs(delta)): self.doubleSpinBox_pcs.pop().setParent(None) self.labels.pop().setParent(None) # ??: self._updateNShapeModes() # ??: self._manualRegUpdate() # If delta is positive, add the specified number of widgets to the layout elif delta > 0: # Get the position of the last cell cell_row = math.ceil(self._ui.gridLayout_3.count() / 4) cell_column = 3 if ((self._ui.gridLayout_3.count() % 4) == 0) else 1 for it in range(abs(delta)): if cell_column == 3: cell_row += 1 cell_column = 1 else: cell_column = 3 widget = QDoubleSpinBox(self._ui.page) widget.setObjectName("doubleSpinBox_pc" + str(self._ui.gridLayout_3.count() + 1)) widget.setMinimum(-99.000000000000000) widget.setMaximum(99.000000000000000) widget.setSingleStep(0.100000000000000) self._ui.gridLayout_3.addWidget(widget, cell_row, cell_column, 1, 1) widget.valueChanged.connect(self._manualRegUpdate) self._updateNShapeModes() label = QLabel(self._ui.page) label.setObjectName("label_" + str(self._ui.gridLayout_3.count() + 1)) label.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter) label.setText(str(int((self._ui.gridLayout_3.count() + 1) / 2))) self._ui.gridLayout_3.addWidget(label, cell_row, cell_column - 1, 1, 1) self.doubleSpinBox_pcs.append(widget) self.labels.append(label) def _initialiseObjectTable(self): self._ui.tableWidget.setRowCount(self._objects.getNumberOfObjects()) self._ui.tableWidget.verticalHeader().setVisible(False) self._ui.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers) self._ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows) self._ui.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection) # 'none' is first elem in self._landmarkNames, so skip that row = 0 # Add input landmarks for li, ln in enumerate(sorted(self.data.inputLandmarks.keys())): self._addObjectToTable(row, ln, self._objects.getObject(ln), checked=True) row += 1 # Add adjusted landmarks for ln in self.data.targetLandmarkNames: ln = ln + '_adjusted' self._addObjectToTable(row, ln, self._objects.getObject(ln), checked=True) row += 1 # Add bone models for mn in list(self.data.LL.models.keys()): self._addObjectToTable(row, mn, self._objects.getObject(mn), checked=True) row += 1 # self._modelRow = r self._ui.tableWidget.resizeColumnToContents(self.objectTableHeaderColumns['Visible']) def _addObjectToTable(self, row, name, obj, checked=True): type_name = obj.typeName print(('adding to table: %s (%s)' % (name, type_name))) table_item = QTableWidgetItem(name) if checked: table_item.setCheckState(Qt.Checked) else: table_item.setCheckState(Qt.Unchecked) self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['Visible'], table_item) # It seems all this method does is print which item has been clicked, which is already done in _visibleBoxChanged def _tableItemClicked(self): pass # selected_row = self._ui.tableWidget.currentRow() # # self.selectedObjectName = self._ui.tableWidget.item( # selected_row, # self.objectTableHeaderColumns['Visible'] # ).text() # # print(selected_row) # print(self.selectedObjectName) def _visibleBoxChanged(self, table_item): # Checked changed item is actually the checkbox if table_item.column() == self.objectTableHeaderColumns['Visible']: # Get visible status name = table_item.text() visible = table_item.checkState() == QtGui.Qt.CheckState.Checked # Toggle visibility obj = self._objects.getObject(name) if obj.sceneObject: obj.setVisibility(visible) else: obj.draw(self._scene) def _getSelectedObjectName(self): return self.selectedObjectName def _getSelectedScalarName(self): return 'none' def _drawObjects(self): for name in self._objects.getObjectNames(): self._objects.getObject(name).draw(self._scene) def _updateSceneModels(self): for mn in self.data.LL.models: mesh_obj = self._objects.getObject(mn) mesh_obj.updateGeometry(None, self._scene) def _manualRegUpdate(self): if not self._lockManualRegUpdate: self._saveConfigs() # self.data.updateLLModel() # LL is auto updated in _saveConfigs self._updateSceneModels() def _autoRegChanged(self): self.data.kneeCorr = self._ui.checkBox_kneecorr.isChecked() self.data.kneeDOF = self._ui.checkBox_kneedof.isChecked() def _regLockUI(self): self.landmarkTable.disable() self._ui.doubleSpinBox_markerRadius.setEnabled(False) self._ui.doubleSpinBox_skinPad.setEnabled(False) for index in range(len(self.doubleSpinBox_pcs)): self.doubleSpinBox_pcs[index].setEnabled(False) self._ui.doubleSpinBox_ptx.setEnabled(False) self._ui.doubleSpinBox_pty.setEnabled(False) self._ui.doubleSpinBox_ptz.setEnabled(False) self._ui.doubleSpinBox_prx.setEnabled(False) self._ui.doubleSpinBox_pry.setEnabled(False) self._ui.doubleSpinBox_prz.setEnabled(False) self._ui.doubleSpinBox_hiplx.setEnabled(False) self._ui.doubleSpinBox_hiply.setEnabled(False) self._ui.doubleSpinBox_hiplz.setEnabled(False) self._ui.doubleSpinBox_hiprx.setEnabled(False) self._ui.doubleSpinBox_hipry.setEnabled(False) self._ui.doubleSpinBox_hiprz.setEnabled(False) self._ui.doubleSpinBox_kneelx.setEnabled(False) self._ui.doubleSpinBox_kneely.setEnabled(False) self._ui.doubleSpinBox_kneelz.setEnabled(False) self._ui.doubleSpinBox_kneerx.setEnabled(False) self._ui.doubleSpinBox_kneery.setEnabled(False) self._ui.doubleSpinBox_kneerz.setEnabled(False) self._ui.pushButton_manual_accept.setEnabled(False) self._ui.pushButton_manual_reset.setEnabled(False) self._ui.comboBox_regmode.setEnabled(False) self._ui.spinBox_pcsToFit.setEnabled(False) self._ui.spinBox_mWeight.setEnabled(False) self._ui.checkBox_kneecorr.setEnabled(False) self._ui.checkBox_kneedof.setEnabled(False) self._ui.pushButton_auto_accept.setEnabled(False) self._ui.pushButton_auto_reset.setEnabled(False) self._ui.pushButton_auto_abort.setEnabled(False) self._ui.pushButton_auto_reg.setEnabled(False) def _regUnlockUI(self): self.landmarkTable.enable() self._ui.doubleSpinBox_markerRadius.setEnabled(True) self._ui.doubleSpinBox_skinPad.setEnabled(True) for index in range(len(self.doubleSpinBox_pcs)): self.doubleSpinBox_pcs[index].setEnabled(True) self._ui.doubleSpinBox_ptx.setEnabled(True) self._ui.doubleSpinBox_pty.setEnabled(True) self._ui.doubleSpinBox_ptz.setEnabled(True) self._ui.doubleSpinBox_prx.setEnabled(True) self._ui.doubleSpinBox_pry.setEnabled(True) self._ui.doubleSpinBox_prz.setEnabled(True) self._ui.doubleSpinBox_hiplx.setEnabled(True) self._ui.doubleSpinBox_hiprx.setEnabled(True) self._ui.doubleSpinBox_hiply.setEnabled(True) self._ui.doubleSpinBox_hipry.setEnabled(True) self._ui.doubleSpinBox_hiplz.setEnabled(True) self._ui.doubleSpinBox_hiprz.setEnabled(True) self._ui.doubleSpinBox_kneelx.setEnabled(True) self._ui.doubleSpinBox_kneerx.setEnabled(True) self._ui.doubleSpinBox_kneely.setEnabled(True) self._ui.doubleSpinBox_kneery.setEnabled(True) self._ui.doubleSpinBox_kneelz.setEnabled(True) self._ui.doubleSpinBox_kneerz.setEnabled(True) self._ui.pushButton_manual_accept.setEnabled(True) self._ui.pushButton_manual_reset.setEnabled(True) self._ui.comboBox_regmode.setEnabled(True) self._ui.spinBox_pcsToFit.setEnabled(True) self._ui.spinBox_mWeight.setEnabled(True) self._ui.checkBox_kneecorr.setEnabled(True) self._ui.checkBox_kneedof.setEnabled(True) self._ui.pushButton_auto_accept.setEnabled(True) self._ui.pushButton_auto_reset.setEnabled(True) self._ui.pushButton_auto_abort.setEnabled(True) self._ui.pushButton_auto_reg.setEnabled(True) def _regUpdate(self, output): # update models in scene self._updateSceneModels() # update error field self._ui.lineEdit_landmarkError.setText('{:5.2f}'.format(self.data.landmarkRMSE)) self._ui.lineEdit_mDist.setText('{:5.2f}'.format(self.data.fitMDist)) # unlock reg ui self._regUnlockUI() # update configs self._lockManualRegUpdate = True self._updateConfigs() self._lockManualRegUpdate = False def _regCallback(self, output): self._updateSceneModels() def _autoReg(self): # self._saveConfigs() # Auto-reg doesn't work if any of the shape values are non-zero self.data.LL.shape_mode_weights = np.zeros(self._ui.spinBox_pcsToFit.value(), dtype=float) self._worker.start() self._regLockUI() def _reset(self): self.data.resetLL() self._lockManualRegUpdate = True self._updateConfigs() self._lockManualRegUpdate = False self._updateSceneModels() # clear error fields self._ui.lineEdit_landmarkError.clear() self._ui.lineEdit_mDist.clear() def _accept(self): self._saveConfigs() self._close() self.done_execution() def _abort(self): self._reset() self._close() def _close(self): for name in self._objects.getObjectNames(): self._objects.getObject(name).remove() self._objects._objects = {} def _refresh(self): for r in range(self._ui.tableWidget.rowCount()): table_item = self._ui.tableWidget.item(r, self.objectTableHeaderColumns['Visible']) if table_item is None: continue name = table_item.text() visible = table_item.checkState() == QtGui.Qt.CheckState.Checked obj = self._objects.getObject(name) if obj.sceneObject: obj.setVisibility(visible) else: obj.draw(self._scene) def _saveScreenShot(self): filename = self._ui.screenshotFilenameLineEdit.text() width = int(self._ui.screenshotPixelXLineEdit.text()) height = int(self._ui.screenshotPixelYLineEdit.text()) self._scene.mlab.savefig(filename, size=(width, height)) # ================================================================# # @on_trait_change('scene.activated') # def testPlot(self): # # This function is called when the view is opened. We don't # # populate the scene when the view is not yet open, as some # # VTK features require a GLContext. # print('trait_changed') # # We can do normal mlab calls on the embedded scene. # self._scene.mlab.test_points3d() # def _saveImage_fired( self ): # self.scene.mlab.savefig( str(self.saveImageFilename), size=( int(self.saveImageWidth), \ # int(self.saveImageLength) ) )
# This file is part of Indico. # Copyright (C) 2002 - 2020 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals import os from collections import namedtuple from datetime import datetime, time, timedelta from io import BytesIO from operator import attrgetter import pytz from flask import current_app from PIL import Image from sqlalchemy import Date, cast from sqlalchemy.orm import joinedload from indico.core.config import config from indico.core.db import db from indico.core.db.sqlalchemy.links import LinkType from indico.legacy.common.cache import GenericCache from indico.modules.events import Event from indico.modules.events.contributions import Contribution from indico.modules.events.sessions import Session from indico.modules.events.sessions.models.blocks import SessionBlock from indico.modules.events.timetable.models.entries import TimetableEntry from indico.modules.events.timetable.util import find_latest_entry_end_dt from indico.util.caching import memoize_request from indico.util.date_time import now_utc, server_to_utc from indico.util.string import crc32 from indico.util.struct.iterables import group_list ROOM_PHOTO_DIMENSIONS = (290, 170) TempReservationOccurrence = namedtuple('ReservationOccurrenceTmp', ('start_dt', 'end_dt', 'reservation')) TempReservationConcurrentOccurrence = namedtuple('ReservationOccurrenceTmp', ('start_dt', 'end_dt', 'reservations')) _cache = GenericCache('Rooms') @memoize_request def rb_check_user_access(user): """Checks if the user has access to the room booking system""" from indico.modules.rb import rb_settings if rb_is_admin(user): return True if not rb_settings.acls.get('authorized_principals'): # everyone has access return True return rb_settings.acls.contains_user('authorized_principals', user) @memoize_request def rb_is_admin(user): """Checks if the user is a room booking admin""" from indico.modules.rb import rb_settings if user.is_admin: return True return rb_settings.acls.contains_user('admin_principals', user) def build_rooms_spritesheet(): from indico.modules.rb.models.rooms import Room image_width, image_height = ROOM_PHOTO_DIMENSIONS rooms = Room.query.filter(Room.photo).options(joinedload('photo')).all() room_count = len(rooms) sprite_width = (image_width * (room_count + 1)) # +1 for the placeholder sprite_height = image_height sprite = Image.new(mode='RGB', size=(sprite_width, sprite_height), color=(0, 0, 0)) # Placeholder image at position 0 no_photo_path = 'web/static/images/rooms/large_photos/NoPhoto.jpg' no_photo_image = Image.open(os.path.join(current_app.root_path, no_photo_path)) image = no_photo_image.resize(ROOM_PHOTO_DIMENSIONS, Image.ANTIALIAS) sprite.paste(image, (0, 0)) mapping = {} for count, room in enumerate(rooms, start=1): location = image_width * count image = Image.open(BytesIO(room.photo.data)).resize(ROOM_PHOTO_DIMENSIONS, Image.ANTIALIAS) sprite.paste(image, (location, 0)) mapping[room.id] = count output = BytesIO() sprite.save(output, 'JPEG') value = output.getvalue() token = crc32(value) _cache.set('rooms-sprite', value) _cache.set('rooms-sprite-mapping', mapping) _cache.set('rooms-sprite-token', token) return token def get_resized_room_photo(room): photo = Image.open(BytesIO(room.photo.data)).resize(ROOM_PHOTO_DIMENSIONS, Image.ANTIALIAS) output = BytesIO() photo.save(output, 'JPEG') return output.getvalue() def remove_room_spritesheet_photo(room): mapping = _cache.get('rooms-sprite-mapping') if not mapping or room.id not in mapping: return del mapping[room.id] _cache.set('rooms-sprite-mapping', mapping) def group_by_occurrence_date(occurrences, sort_by=None): return group_list(occurrences, key=lambda obj: obj.start_dt.date(), sort_by=sort_by) def serialize_occurrences(data): from indico.modules.rb.schemas import (reservation_occurrences_schema) return {dt.isoformat(): reservation_occurrences_schema.dump(data) for dt, data in data.iteritems()} def serialize_blockings(data): from indico.modules.rb.schemas import (simple_blockings_schema) return {dt.isoformat(): simple_blockings_schema.dump(data) for dt, data in data.iteritems()} def serialize_nonbookable_periods(data): from indico.modules.rb.schemas import (nonbookable_periods_schema) return {dt.isoformat(): nonbookable_periods_schema.dump(data) for dt, data in data.iteritems()} def serialize_unbookable_hours(data): from indico.modules.rb.schemas import (bookable_hours_schema) return [bookable_hours_schema.dump(d) for d in data] def serialize_concurrent_pre_bookings(data): from indico.modules.rb.schemas import (concurrent_pre_bookings_schema) return {dt.isoformat(): concurrent_pre_bookings_schema.dump(data) for dt, data in data.iteritems()} def get_linked_object(type_, id_): if type_ == LinkType.event: return Event.get(id_, is_deleted=False) elif type_ == LinkType.contribution: return (Contribution.query .filter(Contribution.id == id_, ~Contribution.is_deleted, Contribution.event.has(is_deleted=False)) .first()) elif type_ == LinkType.session_block: return (SessionBlock.query .filter(SessionBlock.id == id_, SessionBlock.session.has(db.and_(~Session.is_deleted, Session.event.has(is_deleted=False)))) .first()) def is_booking_start_within_grace_period(start_dt, user, allow_admin=False): from indico.modules.rb import rb_settings if allow_admin and rb_is_admin(user): return True default_tz = pytz.timezone(config.DEFAULT_TIMEZONE) start_dt_localized = default_tz.localize(start_dt) grace_period = rb_settings.get('grace_period') if grace_period is None: today = server_to_utc(datetime.now()).astimezone(default_tz).date() return start_dt_localized.date() >= today start_dt_utc = start_dt_localized.astimezone(pytz.utc) grace_period = timedelta(hours=grace_period) return start_dt_utc >= now_utc() - grace_period def serialize_booking_details(booking): from indico.modules.rb.operations.blockings import filter_blocked_rooms, get_rooms_blockings, group_blocked_rooms from indico.modules.rb.operations.bookings import (get_booking_occurrences, get_existing_room_occurrences, group_blockings, group_nonbookable_periods) from indico.modules.rb.operations.misc import get_rooms_nonbookable_periods, get_rooms_unbookable_hours from indico.modules.rb.schemas import reservation_details_schema, reservation_occurrences_schema_with_permissions attributes = reservation_details_schema.dump(booking) date_range, occurrences = get_booking_occurrences(booking) booking_details = dict(attributes) occurrences_by_type = dict(bookings={}, cancellations={}, rejections={}, other={}, blockings={}, unbookable_hours={}, nonbookable_periods={}, overridable_blockings={}) booking_details['occurrences'] = occurrences_by_type booking_details['date_range'] = [dt.isoformat() for dt in date_range] for dt, [occ] in occurrences.iteritems(): serialized_occ = reservation_occurrences_schema_with_permissions.dump([occ]) if occ.is_cancelled: occurrences_by_type['cancellations'][dt.isoformat()] = serialized_occ elif occ.is_rejected: occurrences_by_type['rejections'][dt.isoformat()] = serialized_occ occurrences_by_type['bookings'][dt.isoformat()] = serialized_occ if occ.is_valid else [] start_dt = datetime.combine(booking.start_dt, time.min) end_dt = datetime.combine(booking.end_dt, time.max) unbookable_hours = get_rooms_unbookable_hours([booking.room]).get(booking.room.id, []) other_bookings = get_existing_room_occurrences(booking.room, start_dt, end_dt, booking.repeat_frequency, booking.repeat_interval, skip_booking_id=booking.id) blocked_rooms = get_rooms_blockings([booking.room], start_dt.date(), end_dt.date()) overridable_blockings = group_blocked_rooms(filter_blocked_rooms(blocked_rooms, overridable_only=True, explicit=True)).get(booking.room.id, []) nonoverridable_blockings = group_blocked_rooms(filter_blocked_rooms(blocked_rooms, nonoverridable_only=True, explicit=True)).get(booking.room.id, []) nonbookable_periods = get_rooms_nonbookable_periods([booking.room], start_dt, end_dt).get(booking.room.id, []) nonbookable_periods_grouped = group_nonbookable_periods(nonbookable_periods, date_range) occurrences_by_type['other'] = serialize_occurrences(group_by_occurrence_date(other_bookings)) occurrences_by_type['blockings'] = serialize_blockings(group_blockings(nonoverridable_blockings, date_range)) occurrences_by_type['overridable_blockings'] = serialize_blockings(group_blockings(overridable_blockings, date_range)) occurrences_by_type['unbookable_hours'] = serialize_unbookable_hours(unbookable_hours) occurrences_by_type['nonbookable_periods'] = serialize_nonbookable_periods(nonbookable_periods_grouped) return booking_details def serialize_availability(availability): for data in availability.viewvalues(): data['blockings'] = serialize_blockings(data.get('blockings', {})) data['overridable_blockings'] = serialize_blockings(data.get('overridable_blockings', {})) data['nonbookable_periods'] = serialize_nonbookable_periods(data.get('nonbookable_periods', {})) data['unbookable_hours'] = serialize_unbookable_hours(data.get('unbookable_hours', {})) data['concurrent_pre_bookings'] = serialize_concurrent_pre_bookings(data.get('concurrent_pre_bookings', {})) data.update({k: serialize_occurrences(data[k]) if k in data else {} for k in ('candidates', 'conflicting_candidates', 'pre_bookings', 'bookings', 'conflicts', 'pre_conflicts', 'rejections', 'cancellations')}) return availability def generate_spreadsheet_from_occurrences(occurrences): """Generate spreadsheet data from a given booking occurrence list. :param occurrences: The booking occurrences to include in the spreadsheet """ headers = ['Room', 'Booking ID', 'Booked for', 'Reason', 'Occurrence start', 'Occurrence end'] rows = [{'Room': occ.reservation.room.full_name, 'Booking ID': occ.reservation.id, 'Booked for': occ.reservation.booked_for_name, 'Reason': occ.reservation.booking_reason, 'Occurrence start': occ.start_dt, 'Occurrence end': occ.end_dt} for occ in occurrences] return headers, rows def _find_first_entry_start_dt(event, day): """Find the first timetable entry on a given day.""" if not (event.start_dt_local.date() <= day <= event.end_dt_local.date()): raise ValueError("Day out of event bounds.") entries = event.timetable_entries.filter(TimetableEntry.parent_id.is_(None), cast(TimetableEntry.start_dt.astimezone(event.tzinfo), Date) == day).all() return min(entries, key=attrgetter('start_dt')).start_dt.astimezone(event.tzinfo) if entries else None def _find_latest_entry_end_dt(event, day): dt = find_latest_entry_end_dt(event, day) if dt: return dt.astimezone(event.tzinfo) def get_booking_params_for_event(event): """ Get a set of RB interface parameters suitable for this event. These parameters can then be used to construct a URL that will lead to a pre-filled search that matches the start/end times for a given day. :param event: `Event` object """ is_single_day = event.start_dt_local.date() == event.end_dt_local.date() params = { 'link_type': 'event', 'link_id': event.id, 'text': '#{}'.format(event.room.id) if event.room else None, } all_times = {day: (_find_first_entry_start_dt(event, day), _find_latest_entry_end_dt(event, day)) for day in event.iter_days(tzinfo=event.tzinfo)} # if the timetable is empty on a given day, use (start_dt, end_dt) of the event all_times = [((day, (event.start_dt_local, event.end_dt_local)) if times[0] is None else (day, times)) for day, times in all_times.viewitems()] same_times = len(set(times for (_, times) in all_times)) == 1 if is_single_day or same_times: params['sd'] = event.start_dt_local.date().isoformat() if event.start_dt_local.time() < event.end_dt_local.time(): # if we have suitable times we provide enough data to immediately run a search. # XXX: if filtersAreSet also checked for times we could provide dates/recurrence # as well even when we don't know suitable times.. but that would require extra # code to handle the case of a custom RB interface where no times are used at all params.update({ 'ed': None if is_single_day else event.end_dt_local.date().isoformat(), 'recurrence': 'single' if is_single_day else 'daily', 'st': event.start_dt_local.strftime('%H:%M'), 'et': event.end_dt_local.strftime('%H:%M'), 'number': 1, 'interval': 'week', }) return { 'type': 'same_times', 'params': params } else: time_info = sorted([ (day, { # if we have a proper start/end time, we provide all args to search 'number': 1, 'interval': 'week', 'recurrence': 'single', 'sd': day.isoformat(), 'st': start.strftime('%H:%M'), 'et': end.strftime('%H:%M') } if start.time() < end.time() else { # if not (empty days or event end time < event start time), we just # populate the day and let the user specify the times manually 'sd': day.isoformat(), }) for day, (start, end) in all_times ]) return { 'type': 'mixed_times', 'params': params, 'time_info': time_info } def get_prebooking_collisions(reservation): from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence valid_occurrences = reservation.occurrences.filter(ReservationOccurrence.is_valid).all() return ReservationOccurrence.find_overlapping_with(reservation.room, valid_occurrences, reservation.id).all()