input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
right handle', rgt)
#self.build_pickwalking( rootNode )
# TODO: Evaluate if this should rather be in Rig or Transform
def switch_fkik(self, **kwargs):
am = AniMeta()
char = None
limb = None
side = None
newMode = 0
if 'Character' in kwargs:
char = kwargs['Character']
if 'Limb' in kwargs:
limb = kwargs['Limb']
if 'Side' in kwargs:
side = kwargs['Side']
# TODO: Make sure rig is in control mode
if char and limb and side:
ctrl = 'Foot_IK_Lft_Ctrl'
if limb == 'Leg' and side == 'Lft':
ctrl = 'Foot_IK_Lft_Ctrl'
if limb == 'Leg' and side == 'Rgt':
ctrl = 'Foot_IK_Rgt_Ctrl'
if limb == 'Arm' and side == 'Rgt':
ctrl = 'Hand_IK_Rgt_Ctrl'
if limb == 'Arm' and side == 'Lft':
ctrl = 'Hand_IK_Lft_Ctrl'
ik_node = self.find_node( char, ctrl )
newMode = 1 - int(mc.getAttr(ik_node + '.FK_IK'))
############################################################################################################
# Leg
if limb == 'Leg':
# From IK to FK
if newMode == 0:
nodes = ['LegUp_FK_{}_Ctrl', 'LegLo_FK_{}_Ctrl', 'Foot_FK_{}_Ctrl', 'Toes_FK_{}_Ctrl']
joints = ['LegUp_IK_{}_Jnt', 'LegLo_IK_{}_Jnt', 'Foot_IK_{}_Jnt', 'Toes_IK_{}_Jnt']
for i in range(len(nodes)):
ctrlName = nodes[i].format(side)
jntName = joints[i].format(side)
ctrl = am.find_node(char, ctrlName)
jnt = am.find_node(char, jntName)
if ctrl is None:
mc.warning('aniMeta: can not find ' + ctrlName)
break
if jnt is None:
mc.warning('aniMeta: can not find ' + jntName)
break
m = self.get_matrix( jnt, kWorld )
self.set_matrix(ctrl, m, kWorld, setScale=False )
mc.setAttr(ik_node + '.FK_IK', 0)
# From FK to IK
elif newMode == 1:
# Heel
heel_ctrl = 'Heel_IK_{}_Ctrl'.format(side)
toesTip_ctrl = 'ToesTip_IK_{}_Ctrl'.format(side)
footLift_ctrl = 'FootLift_IK_{}_Ctrl'.format(side)
for node in [heel_ctrl, toesTip_ctrl, footLift_ctrl]:
node = am.find_node(char, node)
if node:
self.reset_handle( node )
# Foot
legUp_ik_jnt = 'LegUp_IK_{}_Jnt'.format(side)
legLo_ik_jnt = 'LegLo_IK_{}_Jnt'.format(side)
pole_ik = 'LegPole_IK_{}_Ctrl'.format(side)
foot_ik = 'Foot_IK_{}_Ctrl'.format(side)
foot_jnt = 'Foot_{}_Jnt'.format(side)
legUp_ik_jnt = am.find_node(char, legUp_ik_jnt)
legLo_ik_jnt = am.find_node(char, legLo_ik_jnt)
pole_ik = am.find_node(char, pole_ik)
foot_ik = am.find_node(char, foot_ik)
foot_jnt = am.find_node(char, foot_jnt)
m = self.get_matrix( foot_jnt, kWorld )
if side == 'Rgt':
m = om.MEulerRotation( math.radians(180),0,0 ).asMatrix() * m
self.set_matrix(foot_ik, m, kWorld, setScale = False )
pa = mc.getAttr( legLo_ik_jnt + '.preferredAngle' )[0]
out = self.get_polevector_position( legUp_ik_jnt, legLo_ik_jnt, foot_jnt, pa )
self.set_matrix( pole_ik, out, kWorld)
mc.setAttr(ik_node + '.FK_IK', 1)
# Leg
############################################################################################################
############################################################################################################
# Arm
if limb == 'Arm':
# From IK to FK
if newMode == 0:
nodes = ['ArmUp_FK_{}_Ctrl', 'ArmLo_FK_{}_Ctrl', 'Hand_FK_{}_Ctrl' ]
joints = ['ArmUp_{}_Jnt', 'ArmLo_{}_Jnt', 'Hand_{}_Jnt' ]
for i in range(len(nodes)):
ctrlName = nodes[i].format(side)
jntName = joints[i].format(side)
ctrl = am.find_node(char, ctrlName)
jnt = am.find_node(char, jntName)
if ctrl is None:
mc.warning('aniMeta: can not find ', ctrlName)
break
if jnt is None:
mc.warning('aniMeta: can not find ', jntName)
break
m = self.get_matrix( jnt )
self.set_matrix( ctrl, m )
mc.setAttr(ik_node + '.FK_IK', 0)
# From FK to IK
elif newMode == 1:
# Foot
armUp_ik_jnt = 'ArmUp_IK_{}_Jnt'.format(side)
armLo_ik_jnt = 'ArmLo_IK_{}_Jnt'.format(side)
pole_ik = 'ArmPole_IK_{}_Ctrl'.format(side)
hand_ik = 'Hand_IK_{}_Ctrl'.format(side)
hand_jnt = 'Hand_{}_Jnt'.format(side)
armUp_ik_jnt = am.find_node(char, armUp_ik_jnt)
armLo_ik_jnt = am.find_node(char, armLo_ik_jnt)
pole_ik = am.find_node(char, pole_ik)
hand_ik = am.find_node(char, hand_ik)
hand_jnt = am.find_node(char, hand_jnt)
m = self.get_matrix(hand_jnt)
if side == 'Rgt':
m = om.MEulerRotation(math.radians(180),0,0).asMatrix() * m
self.set_matrix(hand_ik, m, setScale = False )
pa = mc.getAttr( armLo_ik_jnt + '.preferredAngle' )[0]
out = Transform().get_polevector_position( armUp_ik_jnt, armLo_ik_jnt, hand_jnt, pa )
Transform().set_matrix( pole_ik, out, kWorld)
mc.setAttr(ik_node + '.FK_IK', 1)
# Arm
############################################################################################################
else:
mc.warning('aniMeta: can not switch rig ', char, limb, side)
return newMode
def build_pickwalking(self, char ):
# Deactivated for now
return True
def parent_controller( node1, node2 ):
node1 = self.find_node(char, node1)
node2 = self.find_node(char, node2)
if node1 is not None and node2 is not None:
mc.controller( node1, node2, parent=True)
mc.controller( self.find_node(char, 'Main_Ctr_Ctrl'))
parent_controller( 'Torso_Ctr_Ctrl', 'Main_Ctr_Ctrl' )
parent_controller( 'Hips_Ctr_Ctrl', 'Torso_Ctr_Ctrl' )
parent_controller( 'Spine1_Ctr_Ctrl', 'Hips_Ctr_Ctrl' )
parent_controller( 'Spine2_Ctr_Ctrl', 'Spine1_Ctr_Ctrl' )
parent_controller( 'Spine3_Ctr_Ctrl', 'Spine2_Ctr_Ctrl' )
parent_controller( 'Chest_Ctr_Ctrl', 'Spine3_Ctr_Ctrl' )
parent_controller( 'Neck_Ctr_Ctrl', 'Chest_Ctr_Ctrl' )
parent_controller( 'Head_Ctr_Ctrl', 'Neck_Ctr_Ctrl' )
parent_controller( 'Clavicle_Lft_Ctrl', 'Chest_Ctr_Ctrl' )
parent_controller( 'ArmUp_FK_Lft_Ctrl', 'Clavicle_Lft_Ctrl' )
parent_controller( 'ArmLo_FK_Lft_Ctrl', 'ArmUp_FK_Lft_Ctrl' )
parent_controller( 'Hand_FK_Lft_Ctrl', 'ArmLo_FK_Lft_Ctrl' )
for side in ['Lft', 'Rgt']:
for finger in ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']:
for i in range(4):
parent = finger + str(i)
if i == 0:
parent = 'Hand_FK'
if finger == 'Thumb' and i == 3:
break
parent_controller(finger + str(i + 1) + '_' + side + '_Ctrl', parent + '_' + side + '_Ctrl' )
parent_controller( 'Foot_IK_' + side + '_Ctrl', 'Hips_Ctr_Ctrl' )
parent_controller( 'FootLift_IK_' + side + '_Ctrl', 'Foot_IK_' + side + '_Ctrl' )
parent_controller( 'Toes_IK_' + side + '_Ctrl', 'FootLift_IK_' + side + '_Ctrl' )
parent_controller( 'ToesTip_IK_' + side + '_Ctrl', 'Toes_IK_' + side + '_Ctrl' )
parent_controller( 'Heel_IK_' + side + '_Ctrl', 'Foot_IK_' + side + '_Ctrl' )
parent_controller( 'LegPole_IK_' + side + '_Ctrl', 'Foot_IK_' + side + '_Ctrl' )
# Biped
#
######################################################################################
######################################################################################
#
# Anim
class Anim(Transform):
def __init__(self):
super( Anim, self ).__init__()
def get_anim_curve_data( self, node ):
dict = { }
animObj = self.get_mobject( node )
if animObj is not None:
animFn = oma.MFnAnimCurve( animObj )
dict[ 'type' ] = curveType[ animFn.animCurveType ]
try:
dict[ 'input' ] = mc.listConnections( node + '.input', s=True, d=False, p=True )[0]
except:
pass
try:
dict[ 'output' ] = mc.listConnections( node + '.output', s=False, d=True, p=True )[0]
except:
pass
# Pre Infinity Type
if animFn.preInfinityType != oma.MFnAnimCurve.kConstant:
dict[ 'pre' ] = animFn.preInfinityType
# Post Infinity Type
if animFn.postInfinityType != oma.MFnAnimCurve.kConstant:
dict[ 'post' ] = animFn.postInfinityType
if animFn.isWeighted:
dict[ 'weighted' ] = animFn.isWeighted
dict['keys'] = {}
times = []
values = []
# Lists redundant?
itt = [] # In tangent type
ott = [] # Out tangent type
itaw = [] # In tangent angle Weight
otaw = [] # Out tangent angle weight
itxy = [] # In tangent XY
otxy = [] # Out tangent XY
alt = []
for i in range( 0, animFn.numKeys ):
if dict['type'] == 'animCurveUA' or dict['type'] == 'animCurveUL':
time_val = animFn.input( i )
else:
time_val = round( animFn.input( i ).value, 5 )
time_tmp = time_val
times.append ( time_tmp )
value_tmp = animFn.value( i )
if dict['type'] == 'animCurveTA':
value_tmp = math.degrees(value_tmp)
values.append( round(value_tmp,5) )
tmp_dict = {}
# In Tangent type
itt.append( animFn.inTangentType( i ) )
ott.append( animFn.outTangentType( i ) )
# In Tangent Angle Weight
itaw_tmp = animFn.getTangentAngleWeight(i,True)
itaw.append( [itaw_tmp[0].asDegrees(), itaw_tmp[1]] )
# Out Tangent Angle Weight
otaw_tmp = animFn.getTangentAngleWeight(i,False)
otaw.append( [otaw_tmp[0].asDegrees(), otaw_tmp[1]] )
# In Tangent
itxy.append( animFn.getTangentXY(i,True))
# Out Tangent
otxy.append( animFn.getTangentXY(i,False))
tmp_dict[ 'bd' ] = animFn.isBreakdown(i)
tmp_dict[ 'wl' ] = animFn.weightsLocked(i)
tmp_dict[ 'tl' ] = animFn.tangentsLocked(i)
if itt[i] != oma.MFnAnimCurve.kTangentAuto:
tmp_dict['itt'] = itt[i]
if ott[i] != oma.MFnAnimCurve.kTangentAuto:
tmp_dict['ott'] = itt[i]
if itaw[i][0] != 0.0:
tmp_dict['ia'] = round( itaw[i][0], 5 )
if itaw[i][1] != 1.0:
tmp_dict['iw'] = round( itaw[i][1], 5 )
if otaw[i][0] != 0.0:
tmp_dict['oa'] = round( otaw[i][0], 5 )
if otaw[i][1] != 1.0:
tmp_dict['ow'] = round( otaw[i][1], 5 )
if itxy[i][0] != 1.0:
tmp_dict['ix'] = round( itxy[i][0], 5 )
if itxy[i][1] != 0.0:
tmp_dict['iy'] = round( itxy[i][1], 5 )
if otxy[i][0] != 1.0:
tmp_dict['ox'] = round( otxy[i][0], 5 )
if otxy[i][1] != 0.0:
tmp_dict['oy'] = round( otxy[i][1], 5 )
if len ( tmp_dict ) > 0:
tmp_dict[ 'time' ] = times[ i ]
alt.append( tmp_dict )
if len ( alt ) > 0:
dict[ 'keys' ]['tangent'] = alt
dict[ 'keys' ]['time'] = times
dict[ 'keys' ]['value'] = values
return dict
def set_anim_curve_data( self, animCurve, data ):
curveObj = self.get_mobject( animCurve )
if curveObj:
try:
animFn = oma.MFnAnimCurve( curveObj )
# obj = animFn.create( 4 ) # animCurveUA
except:
mc.warning('aniMeta: can not set MFnAnimCurve on ' + animCurve)
return False
animFn.setIsWeighted( data[ 'weighted' ] )
keys = data[ 'keys' ]
if 'time' in keys and 'value' in keys:
times = keys[ 'time' ]
values = keys[ 'value' ]
if len( times ) == len( values ):
for i in range( len( times ) ):
animFn.addKey( times[ i ], values[ i ] )
if 'tangent' in keys:
tangents = keys[ 'tangent' ]
for i in range( len( tangents ) ):
tangent = tangents[ i ]
if 'time' in tangent:
# Unlock it before setting tangents
animFn.setTangentsLocked( i, False )
animFn.setWeightsLocked( i, False )
| |
returning from shgrid with ier = ', ier
if ier != 0:
msg = 'Error in return from shgetnp call with -- ' + Shgrid.errorTable(self)[ier]
raise ValueError, msg
iflag = 1
for i in range(1, numberPoints):
np[i], ier = shgridmodule.shgetnp(self.xo, self.yo, self.zo,
self.xi, self.yi, self.zi,
iflag,
iwk, rwk)
if debug == 1:
print '***************** returning from shgrid with ier = ', ier
if ier != 0:
msg = 'Error in return from shgetnp call with -- ' + Shgrid.errorTable(self)[ier]
raise ValueError, msg
return np
#---------------------------------------------------------------------------------
# **************** Control parameter manipulation functions ********************
#---------------------------------------------------------------------------------
def parameterNames(self):
#------------------------------------------------------------------------------
#
# purpose: produce a list of the dsgrid parameters
#
# usage: parameters = parameterNames(self)
#
# passed: self
#
# returned: parameters
#
#------------------------------------------------------------------------------
parameters = ['name', '----', 'ncl', 'nfl', 'nls']
return parameters
def makeDefaultParameterTable(self):
#-----------------------------------------------------------------------------------
#
# purpose: construct the dictionary which is the default control parameters table
#
# usage: makeDefaultParameterTable(self)
#
# passed: self
#
# returned: parameterDict
#
#----------------------------------------------------------------------------------
parameterDict = {
'name':('type ',' legal values ',' default ', ' description '),
'----':('-----','------------------- ---','------------------------ ', '------------------------------------------------------'),
'ncl' :(' int ','>ncl > 0 ','int(math.pow(n/3.,1./3.))', 'granularity of 3-D cell grid '),
'nfl' :(' int ','1<= nfl <= min(32,n-1)','min(32, n-1) ', 'number of input data values within radius of influence'),
'nls' :(' int ','9<= nls <= min(17,n-1)','min(17, n-1) ', 'number of data values used in the least square fit ')}
return parameterDict
def makeInstanceParameterTable(self):
#----------------------------------------------------------------------------------
#
# purpose: construct the dictionary which is the instance parameters table
#
# usage: makeInstanceParameterTable(self)
#
# passed: self
#
# returned: parameterDict
#
#----------------------------------------------------------------------------------
parameterDict = {
'name': ('type ',' legal values ',' value ', ' description '),
'----': ('-----','----------------------','------------- ', '------------------------------------------------------'),
'ncl' : (' int ','>ncl > 0 ',eval('self.ncl'), 'granularity of 3-D cell grid '),
'nfl' : (' int ','1<= nfl <= min(32,n-1)',eval('self.nfl'), 'number of input data values within radius of influence'),
'nls' : (' int ','9<= nls <= min(17,n-1)',eval('self.nls'), 'number of data values used in the least square fit ')}
return parameterDict
def printDefaultParameterTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the value of all the parameters
usage: r.printDefaultParameterTable()
where r is an instance of Dsgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Shgrid.parameterNames(self)
parameterDict = Shgrid.makeDefaultParameterTable(self)
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print '%-6.6s %-6.6s %-22.22s %-24.24s %s' % items
return
def printInstanceParameterTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the value of all the parameters
usage: r.printInstanceParameterTable()
where r is an instance of Dsgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Shgrid.parameterNames(self)
parameterDict = Shgrid.makeInstanceParameterTable(self)
for item in names:
items = (item, parameterDict[item][0], parameterDict[item][1], parameterDict[item][2], parameterDict[item][3])
print '%-6.6s %-6.6s %-22.22s %-14.14s %s' % items
return
def printInstanceParameters(self):
""" --------------------------------------------------------------------------------------------------------
purpose: print the values of the current dsgrid control parameters in c code
usage: r. printInstanceParameters()
where r is an instance of Dsgrid
passed: self
returned: None
--------------------------------------------------------------------------------------------------------"""
names = Shgrid.parameterNames(self)
names = names[2:]
for name in names:
print 'Currently, %s = %d' % (name, eval('self.' + name))
return None
def setInstanceParameters(self):
#---------------------------------------------------------------------------
#
# purpose: set the instance values of the current shgrid control parameters in c code
#
# usage: r.setInstanceParameters()
#
# where r is an instance of Shgrid
#
# passed: self
#
# returned: None
#
#----------------------------------------------------------------------------
names = Shgrid.parameterNames(self)
names = names[2:]
for name in names:
shgridmodule.shseti(name, eval('self.' + name))
return None
#---------------------------------------------------------------------------------
# ***************************** Error Table ************************************
#---------------------------------------------------------------------------------
def errorTable(self):
""" --------------------------------------------------------------------------------------------------------
purpose: construct the dictionary which provides access to error messages
usage: errorDict = r.errorTable()
where r is an instance of Dsgrid
returned: errorDict
--------------------------------------------------------------------------------------------------------"""
errorDict = {
0: 'No error',
1: 'Number of input data points must be > 9.',
2: 'Number of points used to calculate weights must be > 9.',
3: 'Number of data points used in the least squares fit must be > 9.',
4: 'Number of points used to calculate weights must be at least 1.',
5: 'Number of data points used in the least squares fit too large. Should be greater than n but less than 41.',
6: 'Number of points used to calculate weights too large. Should be greater than n but less than 41.',
7: 'Cell grid dimensions must be positive.',
8: 'Duplicate input points encountered.',
9: 'Collinear input, no unique solution.',
10: 'At least two points must have different x coordinates.',
11: 'At least two points must have different y coordinates.',
12: 'At least two points must have different z coordinates.',
13: 'No cells contain a point within the radius of influence of the input point.',
14: 'Negative search radius in calculating least squares fit.' }
return errorDict
#---------------------------------------------------------------------------------
# *************************** magic functions *********************************
#---------------------------------------------------------------------------------
def __setattr__(self, name, value):
#---------------------------------------------------------------------------------
#
# purpose: '__setattr__' is called on every assignment to an instance attribute.
# Consequently, it must put the value in through the __dict__ to avoid
# calling itself and setting up an infinite recursion loop.It sets the
# attribute called name to value in two steps.
# One -- set the global C code control parameter
# Two -- set the instance self data control parameter
#
# usage: x.name = value
#
# passed : name and value
#
# returned: None
#
#---------------------------------------------------------------------------------
names = Shgrid.parameterNames(self)
names = names[2:]
if name in names:
shgridmodule.shseti(name, value)
self.__dict__[name] = value
else:
self.__dict__[name] = value
return None
def __getattr__(self, name):
#---------------------------------------------------------------------------------
#
# purpose: '__getattr__' is called only if a referenced attribute can not be found
# in the instance. It gets the attribute from shgridmodule if possible.
#
# usage: x.name -- name is the oject and not a string repr
#
# passed : name
#
# returned: x.name
#
#---------------------------------------------------------------------------------
names = Shgrid.parameterNames(self)
names = names[2:]
if name in names:
value = shgridmodule.shgeti(name)
else:
value = self.__dict__[name]
return value
#---------------------------------------------------------------------------------
# *******************************************************************
# **************** end of magic functions **************************
# *******************************************************************
#---------------------------------------------------------------------------------
def reverseData(self, data):
#------------------------------------------------------------------------------
#
# purpose: reverse the order of the data if outgrid submitted was not increasing
#
# usage: data = r.reverseData
#
# returned: parameters
#
#------------------------------------------------------------------------------
if self.zreverse == 'yes':
data = data[::-1,:,:]
if self.yreverse == 'yes':
data = data[:,::-1,:]
if self.xreverse == 'yes':
data = data[:,:,::-1]
return data
def checkdim(x, y, z):
#------------------------------------------------------------------------------------------
#
# purpose: determine whether the coordinate grid is random or monotonically increasing
#
# usage:
#
# returned: x, y, z, monotonic, xreverse, yreverse, zreverse
#
#-------------------------------------------------------------------------------------------
# x coordinate examination
xsize = len(x)
if x[0] > x[xsize - 1]:
x = x[::-1]
xreverse = 'yes'
else:
xreverse = 'no'
xmonotonic = 'yes' # monotonic and possibly reversed to make it montonically increasing
for n in range(1, xsize):
if x[n] < x[n - 1]:
xmonotonic = 'no' # not monotonic so return the original grid
# y coordinate examination
ysize = len(y)
if y[0] > y[ysize - 1]:
y = y[::-1]
yreverse = 'yes'
else:
yreverse = 'no'
ymonotonic = 'yes' # monotonic and possibly reversed to make it montonically increasing
for n in range(1, ysize):
if y[n] < y[n - 1]:
ymonotonic = 'no' # not monotonic so return the original grid
# z coordinate examination
zsize = len(z)
if z[0] > z[zsize - 1]:
z = z[::-1]
zreverse = 'yes'
else:
zreverse = 'no'
zmonotonic = 'yes' # monotonic and possiblz reversed to make it montonicallz increasing
for n in range(1, zsize):
if z[n] < z[n - 1]:
zmonotonic = 'no' # not monotonic so return the original grid
if xmonotonic == 'yes' and ymonotonic == | |
import contextlib
import gc
import re
import sys
import threading
import types
from contextlib import ExitStack, contextmanager
from functools import partial
from typing import List, Callable, Any, cast
import attr
import pytest
from .. import FrameInfo, Traceback, customize, register_get_target
def remove_address_details(line):
return re.sub(r"\b0x[0-9A-Fa-f]+\b", "(address)", line)
def clean_tb_line(line):
return remove_address_details(line).partition(" #")[0]
def assert_tb_matches(tb, expected, error=None):
# smoke test:
str(tb)
tb.as_stdlib_summary()
tb.as_stdlib_summary(capture_locals=True)
for frame in tb.frames:
str(frame)
try:
if error is None and tb.error is not None: # pragma: no cover
raise tb.error
assert type(tb.error) is type(error)
assert remove_address_details(str(tb.error)) == remove_address_details(
str(error)
)
assert len(tb) == len(expected)
for (
entry,
(expect_fn, expect_line, expect_ctx_name, expect_ctx_typename),
) in zip(tb, expected):
assert entry.funcname == expect_fn
assert clean_tb_line(entry.linetext) == expect_line
assert entry.context_name == expect_ctx_name
if entry.context_manager is None:
assert expect_ctx_typename is None
else:
assert type(entry.context_manager).__name__ == expect_ctx_typename
except Exception: # pragma: no cover
print_assert_matches("tb")
raise
def print_assert_matches(get_tb): # pragma: no cover
parent = sys._getframe(1)
get_tb_code = compile(get_tb, "<eval>", "eval")
tb = eval(get_tb_code, parent.f_globals, parent.f_locals)
print("---")
print(str(tb).rstrip())
print("---")
print(" assert_tb_matches(")
print(" " + get_tb + ",")
print(" [")
for entry in tb:
if entry.frame.f_code is get_tb_code:
funcname = parent.f_code.co_name
linetext = get_tb + ","
else:
funcname = entry.funcname
linetext = clean_tb_line(entry.linetext)
typename = type(entry.context_manager).__name__
if typename == "NoneType":
typename = None
record = (funcname, linetext, entry.context_name, typename)
print(" " + repr(record) + ",")
print(" ],")
if tb.error:
print(f" error={remove_address_details(repr(tb.error))},")
print(" )")
def no_abort(_): # pragma: no cover
import trio
return trio.lowlevel.Abort.FAILED
@contextmanager
def null_context():
yield
@contextmanager
def outer_context():
with inner_context() as inner: # noqa: F841
yield
def exit_cb(*exc):
pass
def other_cb(*a, **kw):
pass
@contextmanager
def inner_context():
stack = ExitStack()
with stack:
stack.enter_context(null_context())
stack.push(exit_cb)
stack.callback(other_cb, 10, "hi", answer=42)
yield
@types.coroutine
def async_yield(value):
return (yield value)
null_mgr = null_context()
with null_mgr:
if hasattr(null_mgr, "func"):
null_context_repr = "asynctb._tests.test_traceback.null_context()"
else:
null_context_repr = "null_context(...)"
del null_mgr
# There's some logic in the traceback extraction of running code that
# behaves differently when it's run in a non-main greenlet on CPython,
# because we have to stitch together the traceback portions from
# different greenlets. To exercise it, we'll run some tests in a
# non-main greenlet as well as at top level.
try:
import greenlet # type: ignore
except ImportError:
def try_in_other_greenlet_too(fn):
return fn
else:
def try_in_other_greenlet_too(fn):
def try_both():
fn()
greenlet.greenlet(fn).switch()
return try_both
def frames_from_inner_context(caller):
return [
(
caller,
"with inner_context() as inner:",
"inner",
"_GeneratorContextManager",
),
("inner_context", "with stack:", "stack", "ExitStack"),
(
"inner_context",
f"# stack.enter_context({null_context_repr})",
"stack[0]",
"_GeneratorContextManager",
),
("null_context", "yield", None, None),
(
"inner_context",
"# stack.push(asynctb._tests.test_traceback.exit_cb)",
"stack[1]",
None,
),
(
"inner_context",
"# stack.callback(asynctb._tests.test_traceback.other_cb, 10, 'hi', answer=42)",
"stack[2]",
None,
),
("inner_context", "yield", None, None),
]
def frames_from_outer_context(caller):
return [
(caller, "with outer_context():", None, "_GeneratorContextManager"),
*frames_from_inner_context("outer_context"),
("outer_context", "yield", None, None),
]
@try_in_other_greenlet_too
def test_running():
# These two layers of indirection are mostly to test that skip_callees
# works when using iterate_running.
@customize(skip_frame=True, skip_callees=True)
def call_call_traceback_since(root):
return call_traceback_since(root)
def call_traceback_since(root):
return Traceback.since(root)
def sync_example(root):
with outer_context():
if isinstance(root, types.FrameType):
return call_call_traceback_since(root)
else:
return Traceback.of(root)
# Currently running in this thread
assert_tb_matches(
sync_example(sys._getframe(0)),
[
("test_running", "sync_example(sys._getframe(0)),", None, None),
*frames_from_outer_context("sync_example"),
("sync_example", "return call_call_traceback_since(root)", None, None),
],
)
async def async_example():
root = await async_yield(None)
await async_yield(sync_example(root))
def generator_example():
root = yield
yield sync_example(root)
async def agen_example():
root = yield
yield sync_example(root)
for which in (async_example, generator_example, agen_example):
it = which()
if which is agen_example:
def send(val):
with pytest.raises(StopIteration) as info:
it.asend(val).send(None)
return info.value.value
else:
send = it.send
send(None)
if which is async_example:
line = "await async_yield(sync_example(root))"
else:
line = "yield sync_example(root)"
assert_tb_matches(
send(it),
[
(which.__name__, line, None, None),
*frames_from_outer_context("sync_example"),
("sync_example", "return Traceback.of(root)", None, None),
],
)
def test_suspended():
async def async_example(depth):
if depth >= 1:
return await async_example(depth - 1)
with outer_context():
return await async_yield(1)
async def agen_example(depth):
await async_example(depth)
yield # pragma: no cover
agen_makers = [agen_example]
try:
import async_generator
except ImportError:
agen_backport_example = None
else:
@async_generator.async_generator
async def agen_backport_example(depth):
await async_example(depth)
await yield_() # pragma: no cover
agen_makers.append(agen_backport_example)
# Suspended coroutine
coro = async_example(3)
assert coro.send(None) == 1
assert_tb_matches(
Traceback.of(coro),
[
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
*frames_from_outer_context("async_example"),
("async_example", "return await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
assert_tb_matches(
Traceback.of(coro, with_context_info=False),
[
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_example(depth - 1)", None, None),
("async_example", "return await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
with pytest.raises(StopIteration, match="42"):
coro.send(42)
# Suspended async generator
for thing in agen_makers:
agi = thing(3)
ags = agi.asend(None)
assert ags.send(None) == 1
for view in (agi, ags):
assert_tb_matches(
Traceback.of(view, with_context_info=False),
[
(thing.__name__, "await async_example(depth)", None, None),
(
"async_example",
"return await async_example(depth - 1)",
None,
None,
),
(
"async_example",
"return await async_example(depth - 1)",
None,
None,
),
(
"async_example",
"return await async_example(depth - 1)",
None,
None,
),
("async_example", "return await async_yield(1)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
# Exhausted coro/generator has no traceback
assert_tb_matches(Traceback.of(coro), [])
def test_greenlet():
greenlet = pytest.importorskip("greenlet")
tb_main = Traceback.of(greenlet.getcurrent())
assert tb_main.error is None and tb_main.frames[-1].funcname == "test_greenlet"
def outer():
with outer_context():
return inner()
def inner():
# Test getting the traceback of a greenlet from inside it
assert_tb_matches(
Traceback.of(gr),
[
*frames_from_outer_context("outer"),
("outer", "return inner()", None, None),
("inner", "Traceback.of(gr),", None, None),
],
)
return greenlet.getcurrent().parent.switch(1)
gr = greenlet.greenlet(outer)
assert_tb_matches(Traceback.of(gr), []) # not started -> empty tb
assert 1 == gr.switch()
assert_tb_matches(
Traceback.of(gr),
[
*frames_from_outer_context("outer"),
("outer", "return inner()", None, None),
("inner", "return greenlet.getcurrent().parent.switch(1)", None, None),
],
)
assert 2 == gr.switch(2)
assert_tb_matches(Traceback.of(gr), []) # dead -> empty tb
# Test tracing into the runner for a dead greenlet
def trivial_runner(gr):
assert_tb_matches(
Traceback.since(sys._getframe(0)),
[("trivial_runner", "Traceback.since(sys._getframe(0)),", None, None)],
)
@register_get_target(trivial_runner)
def get_target(frame, is_terminal):
return frame.f_locals.get("gr")
trivial_runner(gr)
def test_get_target_fails():
outer_frame = sys._getframe(0)
def inner():
return Traceback.since(outer_frame)
@customize(get_target=lambda *args: {}["wheee"])
def example():
return inner()
# Frames that produce an error get mentioned in the traceback,
# even if they'd otherwise be skipped
@customize(skip_frame=True, get_target=lambda *args: {}["wheee"])
def skippy_example():
return inner()
for fn in (example, skippy_example):
assert_tb_matches(
fn(),
[
("test_get_target_fails", "fn(),", None, None),
(fn.__name__, "return inner()", None, None),
],
error=KeyError("wheee"),
)
@pytest.mark.skipif(
sys.implementation.name == "pypy",
reason="https://foss.heptapod.net/pypy/pypy/-/blob/branch/py3.6/lib_pypy/greenlet.py#L124",
)
def test_greenlet_in_other_thread():
greenlet = pytest.importorskip("greenlet")
ready_evt = threading.Event()
done_evt = threading.Event()
gr = None
def thread_fn():
def target():
ready_evt.set()
done_evt.wait()
nonlocal gr
gr = greenlet.greenlet(target)
gr.switch()
threading.Thread(target=thread_fn).start()
ready_evt.wait()
assert_tb_matches(
Traceback.of(gr),
[],
error=RuntimeError(
"Traceback.of(greenlet) can't handle a greenlet running in another thread"
),
)
done_evt.set()
def test_exiting():
# Test traceback when a synchronous context manager is currently exiting.
result: Traceback
@contextmanager
def capture_tb_on_exit(coro):
with inner_context() as inner: # noqa: F841
try:
yield
finally:
nonlocal result
result = Traceback.of(coro)
async def async_capture_tb():
coro = await async_yield(None)
with capture_tb_on_exit(coro):
pass
await async_yield(result)
coro = async_capture_tb()
coro.send(None)
assert_tb_matches(
coro.send(coro),
[
(
"async_capture_tb",
"with capture_tb_on_exit(coro):",
None,
"_GeneratorContextManager",
),
("async_capture_tb", "pass", None, None),
("__exit__", "next(self.gen)", None, None),
*frames_from_inner_context("capture_tb_on_exit"),
("capture_tb_on_exit", "result = Traceback.of(coro)", None, None),
],
)
# Test traceback when an async CM is suspended in __aexit__. The
# definition of __aexit__ as a staticmethod is to foil the logic
# for figuring out which context manager is exiting.
class SillyAsyncCM:
async def __aenter__(self):
pass
@staticmethod
async def __aexit__(*stuff):
await async_yield(None)
async def yield_when_async_cm_exiting():
async with SillyAsyncCM():
pass
coro = yield_when_async_cm_exiting()
coro.send(None)
assert_tb_matches(
Traceback.of(coro),
[
("yield_when_async_cm_exiting", "async with SillyAsyncCM():", None, None),
("yield_when_async_cm_exiting", "pass", None, None),
("__aexit__", "await async_yield(None)", None, None),
("async_yield", "return (yield value)", None, None),
],
)
def test_errors():
with pytest.raises(TypeError, match="must be a frame"):
Traceback.since(42)
with pytest.raises(TypeError, match="must be a frame or integer"):
Traceback.until(sys._getframe(0), limit=2.4)
with pytest.raises(RuntimeError, match="is not an indirect caller of"):
Traceback.until(sys._getframe(1), limit=sys._getframe(0))
@try_in_other_greenlet_too
def test_traceback_until():
outer = sys._getframe(0)
def example():
inner = sys._getframe(0)
def get_tb(limit):
return Traceback.until(inner, limit=limit)
tb1, tb2, tb3 = [get_tb(lim) for lim in (1, outer, None)]
assert tb1 == tb2
assert tb3.frames[-len(tb1) :] == tb1.frames
assert_tb_matches(
tb1,
[
("test_traceback_until", "example()", None, None),
(
"example",
"tb1, tb2, tb3 = [get_tb(lim) for lim in (1, outer, None)]",
None,
None,
),
],
)
example()
@try_in_other_greenlet_too
def test_running_in_thread():
def thread_example(arrived_evt, depart_evt):
with outer_context():
arrived_evt.set()
depart_evt.wait()
def thread_caller(*args):
thread_example(*args)
# Currently running in other thread
for cooked in (False, True):
arrived_evt = threading.Event()
depart_evt = threading.Event()
thread = threading.Thread(target=thread_caller, args=(arrived_evt, depart_evt))
thread.start()
try:
| |
3)
>>> v = torch.randn(150, 1)
>>> out = k.mmv(X1, X2, v, out=None)
>>> out.shape
torch.Size([100, 1])
"""
X1, X2, v, out = self._check_mmv_dimensions(X1, X2, v, out)
self._check_device_properties(X1, X2, v, out, fn_name="mmv", opt=opt)
params = self.params
if opt is not None:
params = dataclasses.replace(self.params, **dataclasses.asdict(opt))
mmv_impl = self._decide_mmv_impl(X1, X2, v, params)
return mmv_impl(X1, X2, v, self, out, params)
def _decide_mmv_impl(self, X1, X2, v, opt: FalkonOptions):
"""Choose which `mmv` function to use for this data.
Note that `mmv` functions compute the kernel-vector product
Parameters
----------
X1 : torch.Tensor
First data matrix, of shape (N x D)
X2 : torch.Tensor
Second data matrix, of shape (M x D)
v : torch.Tensor
Vector for the matrix-vector multiplication (M x T)
opt : FalkonOptions
Falkon options. Options may be specified to force GPU or CPU usage.
Returns
-------
mmv_fn
A function which allows to perform the `mmv` operation.
Notes
-----
This function decides based on the inputs: if the inputs are sparse, it will choose
the sparse implementations; if CUDA is detected, it will choose the CUDA implementation;
otherwise it will simply choose the basic CPU implementation.
"""
use_cuda = decide_cuda(opt)
sparsity = check_sparse(X1, X2)
if not all(sparsity) and any(sparsity):
raise ValueError("Either all or none of 'X1', 'X2' must be sparse.")
if (X1.device.type == 'cuda') and (not use_cuda):
warnings.warn("kernel-vector product backend was chosen to be CPU, but GPU input "
"tensors found. Defaulting to use the GPU (note this may "
"cause issues later). To force usage of the CPU backend, "
"please pass CPU tensors; to avoid this warning if the GPU backend is "
"desired, check your options (i.e. set 'use_cpu=False').")
use_cuda = True
sparsity = all(sparsity)
if use_cuda:
from falkon.mmv_ops.fmmv_cuda import fmmv_cuda, fmmv_cuda_sparse
if sparsity:
return fmmv_cuda_sparse
else:
return fmmv_cuda
else:
if sparsity:
return fmmv_cpu_sparse
else:
return fmmv_cpu
def dmmv(self, X1, X2, v, w, out=None, opt: Optional[FalkonOptions] = None):
# noinspection PyShadowingNames
"""Compute double matrix-vector multiplications where the matrix is the current kernel.
The general form of `dmmv` operations is: `Kernel(X2, X1) @ (Kernel(X1, X2) @ v + w)`
where if `v` is None, then we simply have `Kernel(X2, X1) @ w` and if `w` is None
we remove the additive factor.
**At least one of `w` and `v` must be provided**.
Parameters
----------
X1 : torch.Tensor
The first data-matrix for computing the kernel. Of shape (N x D):
N samples in D dimensions.
X2 : torch.Tensor
The second data-matrix for computing the kernel. Of shape (M x D):
M samples in D dimensions. Set `X2 == X1` to compute a symmetric kernel.
v : torch.Tensor or None
A vector to compute the matrix-vector product. This may also be a matrix of shape
(M x T), but if `T` is very large the operations will be much slower.
w : torch.Tensor or None
A vector to compute matrix-vector products. This may also be a matrix of shape
(N x T) but if `T` is very large the operations will be much slower.
out : torch.Tensor or None
Optional tensor of shape (M x T) to hold the output. If not provided it will
be created.
opt : Optional[FalkonOptions]
Options to be used for computing the operation. Useful are the memory size options
and CUDA options.
Returns
-------
out : torch.Tensor
The (M x T) output.
Examples
--------
>>> k = falkon.kernels.GaussianKernel(sigma=2) # You can substitute the Gaussian kernel by any other.
>>> X1 = torch.randn(100, 3) # N is 100, D is 3
>>> X2 = torch.randn(150, 3) # M is 150
>>> v = torch.randn(150, 1)
>>> w = torch.randn(100, 1)
>>> out = k.dmmv(X1, X2, v, w, out=None)
>>> out.shape
torch.Size([150, 1])
"""
X1, X2, v, w, out = self._check_dmmv_dimensions(X1, X2, v, w, out)
self._check_device_properties(X1, X2, v, w, out, fn_name="dmmv", opt=opt)
params = self.params
if opt is not None:
params = dataclasses.replace(self.params, **dataclasses.asdict(opt))
dmmv_impl = self._decide_dmmv_impl(X1, X2, v, w, params)
return dmmv_impl(X1, X2, v, w, self, out, params)
def _decide_dmmv_impl(self, X1, X2, v, w, opt: FalkonOptions):
"""Choose which `dmmv` function to use for this data.
Note that `dmmv` functions compute double kernel-vector products (see :meth:`dmmv` for
an explanation of what they are).
Parameters
----------
X1 : torch.Tensor
First data matrix, of shape (N x D)
X2 : torch.Tensor
Second data matrix, of shape (M x D)
v : torch.Tensor or None
Vector for the matrix-vector multiplication (M x T)
w : torch.Tensor or None
Vector for the matrix-vector multiplicatoin (N x T)
opt : FalkonOptions
Falkon options. Options may be specified to force GPU or CPU usage.
Returns
-------
dmmv_fn
A function which allows to perform the `mmv` operation.
Notes
-----
This function decides based on the inputs: if the inputs are sparse, it will choose
the sparse implementations; if CUDA is detected, it will choose the CUDA implementation;
otherwise it will simply choose the basic CPU implementation.
"""
use_cuda = decide_cuda(opt)
sparsity = check_sparse(X1, X2)
if not all(sparsity) and any(sparsity):
raise ValueError("Either all or none of 'X1', 'X2' must be sparse.")
if (X1.device.type == 'cuda') and (not use_cuda):
warnings.warn("kernel-vector double product backend was chosen to be CPU, but GPU "
"input tensors found. Defaulting to use the GPU (note this may "
"cause issues later). To force usage of the CPU backend, "
"please pass CPU tensors; to avoid this warning if the GPU backend is "
"desired, check your options (i.e. set 'use_cpu=False').")
use_cuda = True
sparsity = all(sparsity)
if use_cuda:
from falkon.mmv_ops.fmmv_cuda import fdmmv_cuda, fdmmv_cuda_sparse
if sparsity:
return fdmmv_cuda_sparse
else:
return fdmmv_cuda
else:
if sparsity:
return fdmmv_cpu_sparse
else:
return fdmmv_cpu
@abstractmethod
def _prepare(self, X1, X2) -> Any:
"""Pre-processing operations necessary to compute the kernel.
This function will be called with two blocks of data which may be subsampled on the
first dimension (i.e. X1 may be of size `n x D` where `n << N`). The function should
not modify `X1` and `X2`. If necessary, it may return some data which is then made available
to the :meth:`_finalize` method.
For example, in the Gaussian kernel, this method is used to compute the squared norms
of the datasets.
Parameters
----------
X1 : torch.Tensor
(n x D) tensor. It is a block of the `X1` input matrix, possibly subsampled in the
first dimension.
X2 : torch.Tensor
(m x D) tensor. It is a block of the `X2` input matrix, possibly subsampled in the
first dimension.
Returns
-------
Data which may be used for the :meth:`_finalize` method. If no such information is
necessary, returns None.
"""
pass
@abstractmethod
def _apply(self, X1, X2, out) -> None:
"""Main kernel operation, usually matrix multiplication.
This function will be called with two blocks of data which may be subsampled on the
first and second dimension (i.e. X1 may be of size `n x d` where `n << N` and `d << D`).
The output shall be stored in the `out` argument, and not be returned.
Parameters
----------
X1 : torch.Tensor
(n x d) tensor. It is a block of the `X1` input matrix, possibly subsampled in the
first dimension.
X2 : torch.Tensor
(m x d) tensor. It is a block of the `X2` input matrix, possibly subsampled in the
first dimension.
out : torch.Tensor
(n x m) tensor. A tensor in which the output of the operation shall be accumulated.
This tensor is initialized to 0 before calling `_apply`, but in case of subsampling
of the data along the second dimension, multiple calls will be needed to compute a
single (n x m) output block. In such case, the first call to this method will have
a zeroed tensor, while subsequent calls will simply reuse the same object.
"""
pass
@abstractmethod
def _finalize(self, A, d):
"""Final actions to be performed on a partial kernel matrix.
All elementwise operations on the kernel matrix should | |
v] + h4_accelerated[j, v] + h5_accelerated[j, v] + h6_accelerated[j, v] + h7_accelerated[j, v] == j.levels() + 1, name="constrK13")
# model.addConstr(h0_accelerated[j, v] + h1_accelerated[j, v] + h2_accelerated[j, v] + h3_accelerated[j, v] + h4_accelerated[j, v] + h5_accelerated[j, v] == 4, name="constrK7")
model.addConstr(boundary_helper_accelerated[j, v, 0] == and_(h0_accelerated[j, v], h1_accelerated[j, v]), name="constrK9")
model.addConstr(boundary_helper_accelerated[j, v, 1] == and_(h2_accelerated[j, v], h3_accelerated[j, v]), name="constrK10")
model.addConstr(boundary_helper_accelerated[j, v, 2] == and_(h4_accelerated[j, v], h5_accelerated[j, v]), name="constrK11")
model.addConstr(boundary_helper_accelerated[j, v, 3] == and_(h6_accelerated[j, v], h7_accelerated[j, v]), name="constrK12")
# GPU resource demand calculation among different options for different load levels
model.addConstr(gpu_req[j, v] + BIG_M * (1 - boundary_helper_gpu[j, v, 0]) >= j.gpu_req_inrange(in_vector, 0)
- (1 - as_accelerated[j, v]) * j.constant_factor_gpu_inrange(0),
name = "constrDemandGpu_%s_%s" % (j, v))
model.addConstr(gpu_req[j, v] + BIG_M * (1 - boundary_helper_gpu[j, v, 1]) >= j.gpu_req_inrange(in_vector, 1)
- (1 - as_accelerated[j, v]) * j.constant_factor_gpu_inrange(1),
name = "constrDemandGpu_%s_%s" % (j, v))
model.addConstr(gpu_req[j, v] + BIG_M * (1 - boundary_helper_gpu[j, v, 2]) >= j.gpu_req_inrange(in_vector, 2)
- (1 - as_accelerated[j, v]) * j.constant_factor_gpu_inrange(2),
name = "constrDemandGpu_%s_%s" % (j, v))
model.addConstr(gpu_req[j, v] + BIG_M * (1 - boundary_helper_gpu[j, v, 3]) >= j.gpu_req_inrange(in_vector, 3)
- (1 - as_accelerated[j, v]) * j.constant_factor_gpu_inrange(3),
name = "constrDemandGpu_%s_%s" % (j, v))
model.addConstr(j.boundary["accelerated"][0][1] - component_input_sum[j,v] + 0.001 <= BIG_M * h0_gpu[j, v], name="constrL1")
model.addConstr(component_input_sum[j,v] - j.boundary["accelerated"][0][0] + 0.001 <= BIG_M * h1_gpu[j, v], name="constrL2")
model.addConstr(j.boundary["accelerated"][1][1] - component_input_sum[j,v] + 0.001 <= BIG_M * h2_gpu[j, v], name="constrL3")
model.addConstr(component_input_sum[j,v] - j.boundary["accelerated"][1][0] + 0.001 <= BIG_M * h3_gpu[j, v], name="constrL4")
model.addConstr(j.boundary["accelerated"][2][1] - component_input_sum[j,v] + 0.001 <= BIG_M * h4_gpu[j, v], name="constrL5")
model.addConstr(component_input_sum[j,v] - j.boundary["accelerated"][2][0] + 0.001 <= BIG_M * h5_gpu[j, v], name="constrL6")
model.addConstr(j.boundary["accelerated"][3][1] - component_input_sum[j,v] + 0.001 <= BIG_M * h6_gpu[j, v], name="constrL7")
model.addConstr(component_input_sum[j,v] - j.boundary["accelerated"][3][0] + 0.001 <= BIG_M * h7_gpu[j, v], name="constrL8")
model.addConstr(h0_gpu[j, v] + h1_gpu[j, v] + h2_gpu[j, v] + h3_gpu[j, v] + h4_gpu[j, v] + h5_gpu[j, v] + h6_gpu[j, v] + h7_gpu[j, v] == j.levels() + 1, name="constrL13")
# model.addConstr(h0_gpu[j, v] + h1_gpu[j, v] + h2_gpu[j, v] + h3_gpu[j, v] + h4_gpu[j, v] + h5_gpu[j, v] == 4, name="constrL7")
model.addConstr(boundary_helper_gpu[j, v, 0] == and_(h0_gpu[j, v], h1_gpu[j, v]), name="constrL9")
model.addConstr(boundary_helper_gpu[j, v, 1] == and_(h2_gpu[j, v], h3_gpu[j, v]), name="constrL10")
model.addConstr(boundary_helper_gpu[j, v, 2] == and_(h4_gpu[j, v], h5_gpu[j, v]), name="constrL11")
model.addConstr(boundary_helper_gpu[j, v, 3] == and_(h6_gpu[j, v], h7_gpu[j, v]), name="constrL12")
for j in components:
if not j.source:
for v in nodes.ids:
model.addConstr(vm_cpu[j, v] <= 10001 * as_vm[j, v], name="constrA1")
model.addConstr(vm_cpu[j, v] <= cpu_if_vm[j, v], name="constrA2")
model.addConstr(vm_cpu[j, v] >= cpu_if_vm[j, v] - 10001 * (1 - as_vm[j,v]), name="constrA3")
model.addConstr(container_cpu[j, v] <= 10001 * as_container[j, v], name="constrA4")
model.addConstr(container_cpu[j, v] <= cpu_if_container[j, v], name="constrA5")
model.addConstr(container_cpu[j, v] >= cpu_if_container[j, v] - 10001 * (1 - as_container[j,v]), name="constrA6")
model.addConstr(accelerated_cpu[j, v] <= 10001 * as_accelerated[j, v], name="constrA7")
model.addConstr(accelerated_cpu[j, v] <= cpu_if_accelerated[j, v], name="constrA8")
model.addConstr(accelerated_cpu[j, v] >= cpu_if_accelerated[j, v] - 10001 * (1 - as_accelerated[j,v]), name="constrA9")
model.addConstr(cpu_req[j, v] == vm_cpu[j, v]
+ container_cpu[j, v]
+ accelerated_cpu[j, v],
name = "constDemandCPU_%s_%s" % (j, v))
model.addConstr(gpu_req_final[j, v] <= 10001 * as_accelerated[j, v], name="constrA11")
model.addConstr(gpu_req_final[j, v] <= gpu_req[j, v], name="constrA12")
model.addConstr(gpu_req_final[j, v] >= gpu_req[j, v] - 10001 * (1 - as_accelerated[j,v]), name="constrA13")
# Respect node and link capacity constraints
for v in nodes.ids:
model.addConstr(quicksum(cpu_req[j, v] for j in components) <= nodes.cpu[v], name="constrCapCPU_%s" % (v)) # 14
model.addConstr(quicksum(gpu_req_final[j, v] for j in components) <= nodes.gpu[v], name="constrCapGPU_%s" % (v))
for l in links.ids:
model.addConstr(quicksum(link_dr[a, v1, v2, l] for a in arcs for v1 in nodes.ids for v2 in nodes.ids)
<= links.dr[l], name="constrCapLink") # 18
for v in nodes.ids:
model.addConstr(vm_time[j, v] == j.time["vm"] * component_input_sum[j, v] - (1 - as_vm[j, v]) * j.time["vm"] * component_input_sum[j, v], name="constrA14")
model.addConstr(vm_time[j, v] <= BIG_M * as_vm[j, v], name="constrA15")
model.addConstr(container_time[j, v] == j.time["container"] * component_input_sum[j, v] - (1 - as_container[j, v]) * j.time["container"] * component_input_sum[j, v], name="constrA16")
model.addConstr(container_time[j, v] <= BIG_M * as_container[j, v], name="constrA17")
model.addConstr(accelerated_time[j, v] == j.time["accelerated"] * component_input_sum[j, v] - (1 - as_accelerated[j, v]) * j.time["accelerated"] * component_input_sum[j, v], name="constrA18")
model.addConstr(accelerated_time[j, v] <= BIG_M * as_accelerated[j, v], name="constrA19")
# model.addConstr(vm_input[j, v] == component_input_sum[j, v] - (1 - as_vm[j, v]) * component_input_sum[j, v])
# # model.addConstr(vm_input[j, v] == component_input_sum[j, v] * as_vm[j, v])
# model.addConstr(vm_time[j, v] == vm_input[j, v] * j.time["vm"])
# # model.addConstr(cost_vm_cpu[j,v] == vm_time[j,v] * nodes.cpu_cost[v])
# model.addConstr(container_input[j, v] == component_input_sum[j, v] - (1 - as_container[j, v]) * component_input_sum[j, v])
# # model.addConstr(container_input[j, v] == component_input_sum[j, v] * as_container[j, v])
# model.addConstr(container_time[j, v] == container_input[j, v] * j.time["container"])
# # model.addConstr(cost_container_cpu[j,v] == container_time[j,v] * nodes.cpu_cost[v])
# model.addConstr(accelerated_input[j, v] == component_input_sum[j, v] - (1 - as_accelerated[j, v]) * component_input_sum[j, v])
# # model.addConstr(accelerated_input[j, v] == component_input_sum[j, v] * as_accelerated[j, v])
# model.addConstr(accelerated_time[j, v] == accelerated_input[j, v] * j.time["accelerated"])
# # model.addConstr(cost_accelerated_cpu[j,v] == accelerated_time[j,v] * nodes.cpu_cost[v])
# # model.addConstr(gpu_cost[j,v] == accelerated_time[j,v] * nodes.gpu_cost[v])
model.addConstr(processing_time[j, v] == vm_time[j, v] + container_time[j, v] + accelerated_time[j, v], name="constrA20")
# model.addConstr(cpu_cost[j,v] == cost_vm_cpu[j,v] + cost_container_cpu[j,v] + cost_accelerated_cpu[j,v])
for j in components:
model.addConstr(maxproctime[j] == max_([processing_time[j,v] for v in nodes.ids]), name="constrA21")
model.addConstr(proctime == quicksum(maxproctime[j] for j in components), name="constrA22")
# model.addConstr(proctime <= 0.09, name="constrA23")
# Calculate total cost of deployment
# model.addConstr(quicksum(cpu_req[j, v] * cpu_cost[j, v] + gpu_req_final[j, v] * gpu_cost[j, v] for j in components for v in nodes.ids) <= total_cost)
# model.addConstr(total_cost == quicksum(cpu_req[j, v] * cpu_cost[j, v] + gpu_req_final[j, v] * gpu_cost[j, v] for j in components for v in nodes.ids))
# model.addConstr(quicksum(cpu_req[j, v] * cpu_cost[j, v] + gpu_req_final[j, v] * gpu_cost[j, v] for j in components for v in nodes.ids) <= total_cost)
for j in components:
for v in nodes.ids:
# model.addConstr(total_cost[j, v] == processing_time[j, v] * cpu_req[j, v] * nodes.cpu_cost[v] + processing_time[j, v] * gpu_req_final[j, v] * nodes.gpu_cost[v])
model.addConstr(compute_cost[j, v] == cpu_req[j, v] * nodes.cpu_cost[v] + gpu_req_final[j, v] * nodes.gpu_cost[v], name="constrA24")
model.addConstr(quicksum(cpu_req[j, v] * nodes.cpu_cost[v] + gpu_req_final[j, v] * nodes.gpu_cost[v] for j in components for v in nodes.ids) == total_cost, name="constrA25")
model.addConstr(max_resource_cost == max_(compute_cost[j, v] for j in components for v in nodes.ids))
model.addConstr(max_processing_time == max_(processing_time[j, v] for j in components for v in nodes.ids))
####################
# OBJECTIVE
# cpu_cost = 1
# gpu_cost = 3
# model.setObjectiveN(quicksum(cpu_req[j, v] * processing_time[j, v] * nodes.cpu_cost[v]
# + gpu_req_final[j, v] * processing_time[j, v] * nodes.gpu_cost[v]
# for j in components for v in nodes.ids),
# index=0, priority=5, name="MinimizeTotalCost")
# model.setObjectiveN(quicksum(link_dr[a, v1, v2, l] for a in arcs for v1 in nodes.ids for v2 in nodes.ids for l in links.ids),
# index=1, priority=4, name="MinimizeNetworkDemand")
# model.setObjectiveN(quicksum(changed[j, v] for j in components for v in nodes.ids),
# index=2, priority=1, name="MinimizeChanged")
##
# model.setObjectiveN(proctime,
# index=1, priority=3, name="MinimizeProcessingTime")
# model.setObjectiveN(total_cost,
# index=2, priority=1, name="MinimizeComputeDemand")
# # model.setObjectiveN(quicksum(link_dr[a, v1, v2, l] for a in arcs for v1 in nodes.ids for v2 in nodes.ids for l in links.ids),
# # index=2, priority=3, name="MinimizeNetworkDemand")
# model.setObjectiveN(quicksum(changed[j, v] for j in components for v in nodes.ids),
# index=0, priority=5, name="MinimizeChanged")
##
# wchange = 1
# wdr = wchange + len(components) * len(nodes.ids)
# wcost = wdr + len(links.ids) * 500
# wtime = wcost + len(links.ids) * 500
# model.setObjective(wtime * max(processing_time[j, v] for j in components for v in nodes.ids)
# + wcost * max(compute_cost[j, v] for j in components for v in nodes.ids)
# + wdr * quicksum(link_dr[a, v1, v2, l] for a in arcs for v1 in nodes.ids for v2 in nodes.ids for l in links.ids)
# + wchange * quicksum(changed[j, v] for j in components for v in nodes.ids))
# model.setObjective(wcost * quicksum(processing_time[j, v] * compute_cost[j, v] for j in components for v in nodes.ids)
# + wdr * quicksum(link_dr[a, v1, v2, l] for a in arcs for v1 in nodes.ids for v2 in nodes.ids for l in links.ids)
# + wchange * quicksum(changed[j, | |
<gh_stars>1-10
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
from pyqtgraph.Point import Point
import pyqtgraph.debug as debug
import weakref
import pyqtgraph.functions as fn
from GraphicsWidget import GraphicsWidget
__all__ = ['AxisItem']
class AxisItem(GraphicsWidget):
def __init__(self, orientation, pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True):
"""
GraphicsItem showing a single plot axis with ticks, values, and label.
Can be configured to fit on any side of a plot, and can automatically synchronize its displayed scale with ViewBox items.
Ticks can be extended to make a grid.
If maxTickLength is negative, ticks point into the plot.
"""
GraphicsWidget.__init__(self, parent)
self.label = QtGui.QGraphicsTextItem(self)
self.showValues = showValues
self.picture = None
self.orientation = orientation
if orientation not in ['left', 'right', 'top', 'bottom']:
raise Exception("Orientation argument must be one of 'left', 'right', 'top', or 'bottom'.")
if orientation in ['left', 'right']:
#self.setMinimumWidth(25)
#self.setSizePolicy(QtGui.QSizePolicy(
#QtGui.QSizePolicy.Minimum,
#QtGui.QSizePolicy.Expanding
#))
self.label.rotate(-90)
#else:
#self.setMinimumHeight(50)
#self.setSizePolicy(QtGui.QSizePolicy(
#QtGui.QSizePolicy.Expanding,
#QtGui.QSizePolicy.Minimum
#))
#self.drawLabel = False
self.labelText = ''
self.labelUnits = ''
self.labelUnitPrefix=''
self.labelStyle = {'color': '#CCC'}
self.logMode = False
self.textHeight = 18
self.tickLength = maxTickLength
self.scale = 1.0
self.autoScale = True
self.setRange(0, 1)
if pen is None:
pen = QtGui.QPen(QtGui.QColor(100, 100, 100))
self.setPen(pen)
self._linkedView = None
if linkView is not None:
self.linkToView(linkView)
self.showLabel(False)
self.grid = False
#self.setCacheMode(self.DeviceCoordinateCache)
def close(self):
self.scene().removeItem(self.label)
self.label = None
self.scene().removeItem(self)
def setGrid(self, grid):
"""Set the alpha value for the grid, or False to disable."""
self.grid = grid
self.picture = None
self.prepareGeometryChange()
self.update()
def setLogMode(self, log):
self.logMode = log
self.picture = None
self.update()
def resizeEvent(self, ev=None):
#s = self.size()
## Set the position of the label
nudge = 5
br = self.label.boundingRect()
p = QtCore.QPointF(0, 0)
if self.orientation == 'left':
p.setY(int(self.size().height()/2 + br.width()/2))
p.setX(-nudge)
#s.setWidth(10)
elif self.orientation == 'right':
#s.setWidth(10)
p.setY(int(self.size().height()/2 + br.width()/2))
p.setX(int(self.size().width()-br.height()+nudge))
elif self.orientation == 'top':
#s.setHeight(10)
p.setY(-nudge)
p.setX(int(self.size().width()/2. - br.width()/2.))
elif self.orientation == 'bottom':
p.setX(int(self.size().width()/2. - br.width()/2.))
#s.setHeight(10)
p.setY(int(self.size().height()-br.height()+nudge))
#self.label.resize(s)
self.label.setPos(p)
self.picture = None
def showLabel(self, show=True):
#self.drawLabel = show
self.label.setVisible(show)
if self.orientation in ['left', 'right']:
self.setWidth()
else:
self.setHeight()
if self.autoScale:
self.setScale()
def setLabel(self, text=None, units=None, unitPrefix=None, **args):
if text is not None:
self.labelText = text
self.showLabel()
if units is not None:
self.labelUnits = units
self.showLabel()
if unitPrefix is not None:
self.labelUnitPrefix = unitPrefix
if len(args) > 0:
self.labelStyle = args
self.label.setHtml(self.labelString())
self.resizeEvent()
self.picture = None
self.update()
def labelString(self):
if self.labelUnits == '':
if self.scale == 1.0:
units = ''
else:
units = u'(x%g)' % (1.0/self.scale)
else:
#print repr(self.labelUnitPrefix), repr(self.labelUnits)
units = u'(%s%s)' % (self.labelUnitPrefix, self.labelUnits)
s = u'%s %s' % (self.labelText, units)
style = ';'.join(['%s: "%s"' % (k, self.labelStyle[k]) for k in self.labelStyle])
return u"<span style='%s'>%s</span>" % (style, s)
def setHeight(self, h=None):
if h is None:
h = self.textHeight + max(0, self.tickLength)
if self.label.isVisible():
h += self.textHeight
self.setMaximumHeight(h)
self.setMinimumHeight(h)
self.picture = None
def setWidth(self, w=None):
if w is None:
w = max(0, self.tickLength) + 40
if self.label.isVisible():
w += self.textHeight
self.setMaximumWidth(w)
self.setMinimumWidth(w)
def setPen(self, pen):
self.pen = pen
self.picture = None
self.update()
def setScale(self, scale=None):
"""
Set the value scaling for this axis.
The scaling value 1) multiplies the values displayed along the axis
and 2) changes the way units are displayed in the label.
For example:
If the axis spans values from -0.1 to 0.1 and has units set to 'V'
then a scale of 1000 would cause the axis to display values -100 to 100
and the units would appear as 'mV'
If scale is None, then it will be determined automatically based on the current
range displayed by the axis.
"""
if scale is None:
#if self.drawLabel: ## If there is a label, then we are free to rescale the values
if self.label.isVisible():
d = self.range[1] - self.range[0]
#(scale, prefix) = fn.siScale(d / 2.)
(scale, prefix) = fn.siScale(max(abs(self.range[0]), abs(self.range[1])))
if self.labelUnits == '' and prefix in ['k', 'm']: ## If we are not showing units, wait until 1e6 before scaling.
scale = 1.0
prefix = ''
self.setLabel(unitPrefix=prefix)
else:
scale = 1.0
if scale != self.scale:
self.scale = scale
self.setLabel()
self.picture = None
self.update()
def setRange(self, mn, mx):
if mn in [np.nan, np.inf, -np.inf] or mx in [np.nan, np.inf, -np.inf]:
raise Exception("Not setting range to [%s, %s]" % (str(mn), str(mx)))
self.range = [mn, mx]
if self.autoScale:
self.setScale()
self.picture = None
self.update()
def linkedView(self):
"""Return the ViewBox this axis is linked to"""
if self._linkedView is None:
return None
else:
return self._linkedView()
def linkToView(self, view):
oldView = self.linkedView()
self._linkedView = weakref.ref(view)
if self.orientation in ['right', 'left']:
if oldView is not None:
oldView.sigYRangeChanged.disconnect(self.linkedViewChanged)
view.sigYRangeChanged.connect(self.linkedViewChanged)
else:
if oldView is not None:
oldView.sigXRangeChanged.disconnect(self.linkedViewChanged)
view.sigXRangeChanged.connect(self.linkedViewChanged)
def linkedViewChanged(self, view, newRange):
self.setRange(*newRange)
def boundingRect(self):
linkedView = self.linkedView()
if linkedView is None or self.grid is False:
rect = self.mapRectFromParent(self.geometry())
## extend rect if ticks go in negative direction
if self.orientation == 'left':
rect.setRight(rect.right() - min(0,self.tickLength))
elif self.orientation == 'right':
rect.setLeft(rect.left() + min(0,self.tickLength))
elif self.orientation == 'top':
rect.setBottom(rect.bottom() - min(0,self.tickLength))
elif self.orientation == 'bottom':
rect.setTop(rect.top() + min(0,self.tickLength))
return rect
else:
return self.mapRectFromParent(self.geometry()) | linkedView.mapRectToItem(self, linkedView.boundingRect())
def paint(self, p, opt, widget):
if self.picture is None:
self.picture = QtGui.QPicture()
painter = QtGui.QPainter(self.picture)
try:
self.drawPicture(painter)
finally:
painter.end()
self.picture.play(p)
def tickSpacing(self, minVal, maxVal, size):
"""Return values describing the desired spacing and offset of ticks.
This method is called whenever the axis needs to be redrawn and is a
good method to override in subclasses that require control over tick locations.
The return value must be a list of three tuples:
[
(major tick spacing, offset),
(minor tick spacing, offset),
(sub-minor tick spacing, offset),
...
]
"""
dif = abs(maxVal - minVal)
if dif == 0:
return []
## decide optimal minor tick spacing in pixels (this is just aesthetics)
pixelSpacing = np.log(size+10) * 5
optimalTickCount = size / pixelSpacing
if optimalTickCount < 1:
optimalTickCount = 1
## optimal minor tick spacing
optimalSpacing = dif / optimalTickCount
## the largest power-of-10 spacing which is smaller than optimal
p10unit = 10 ** np.floor(np.log10(optimalSpacing))
## Determine major/minor tick spacings which flank the optimal spacing.
intervals = np.array([1., 2., 10., 20., 100.]) * p10unit
minorIndex = 0
while intervals[minorIndex+1] <= optimalSpacing:
minorIndex += 1
return [
(intervals[minorIndex+2], 0),
(intervals[minorIndex+1], 0),
(intervals[minorIndex], 0)
]
def tickValues(self, minVal, maxVal, size):
"""
Return the values and spacing of ticks to draw
[
(spacing, [major ticks]),
(spacing, [minor ticks]),
...
]
By default, this method calls tickSpacing to determine the correct tick locations.
This is a good method to override in subclasses.
"""
if self.logMode:
return self.logTickValues(minVal, maxVal, size)
ticks = []
tickLevels = self.tickSpacing(minVal, maxVal, size)
for i in range(len(tickLevels)):
spacing, offset = tickLevels[i]
## determine starting tick
start = (np.ceil((minVal-offset) / spacing) * spacing) + offset
## determine number of ticks
num = int((maxVal-start) / spacing) + 1
ticks.append((spacing, np.arange(num) * spacing + start))
return ticks
def logTickValues(self, minVal, maxVal, size):
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
major = range(v1+1, v2)
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = filter(lambda x: x>minVal and x<maxVal, minor)
return [(1.0, major), (None, minor)]
def tickStrings(self, values, scale, spacing):
"""Return the strings that should be placed next to ticks. This method is called
when redrawing the axis and is a good method to override in subclasses.
The method is called with a list of tick values, a scaling factor (see below), and the
spacing between ticks (this is required since, in some instances, there may be only
one tick and thus no other way to determine the tick spacing)
The scale argument is used when the axis label is displaying units which may have an SI scaling prefix.
When determining the text to display, | |
<reponame>GT-RAIL/isolation_cyoa<filename>dining_room/models/website.py
import os
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from django.contrib import auth
from django.contrib.auth.models import (AbstractBaseUser,
PermissionsMixin, BaseUserManager)
from django.contrib.auth.validators import ASCIIUsernameValidator
from django.utils import timezone
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.translation import gettext_lazy as _
from .. import constants
from .domain import State, Transition, Suggestions
# Model for managing the study condition
class StudyManagement(models.Model):
"""
This model manages how new users are assigned to study conditions. Note:
the class should've been named StudyManager
"""
enabled_study_conditions = models.PositiveIntegerField(default=0, help_text="A bit vector for the study conditions that are enabled")
enabled_start_conditions = models.TextField(default="none", help_text="\\n separated start conditions")
number_per_condition = models.PositiveIntegerField(default=0, help_text="Number of people per combination of the conditions")
max_number_of_people = models.PositiveIntegerField(default=0, help_text="Maximum number of people to provision IDs for")
max_test_attempts = models.IntegerField(default=5, help_text="Maximum number of times a user can fail the knowledge test")
data_directory = models.CharField(max_length=50, help_text=f"Data directory for user data within '{os.path.join(settings.DROPBOX_ROOT_PATH, settings.DROPBOX_DATA_FOLDER)}'")
max_dx_suggestions = models.IntegerField(default=Suggestions.DEFAULT_MAX_DX_SUGGESTIONS, help_text="Max number of diagnosis suggestions to display", validators=[MinValueValidator(1)])
max_ax_suggestions = models.IntegerField(default=Suggestions.DEFAULT_MAX_AX_SUGGESTIONS, help_text="Max number of action suggestions to display", validators=[MinValueValidator(1)])
pad_suggestions = models.BooleanField(default=Suggestions.DEFAULT_PAD_SUGGESTIONS, help_text="Pad the suggestions if we don't have enough")
_enabled_study_conditions = _enabled_start_conditions = None
class Meta:
verbose_name = _('study management')
verbose_name_plural = _('study management')
def __str__(self):
return self.data_directory
@property
def enabled_start_conditions_list(self):
if self._enabled_start_conditions is None:
self._enabled_start_conditions = []
for condition in self.enabled_start_conditions.split('\n'):
if condition.strip() in User.StartConditions:
self._enabled_start_conditions.append(User.StartConditions(condition.strip()))
return self._enabled_start_conditions
@property
def enabled_study_conditions_list(self):
if self._enabled_study_conditions is None:
self._enabled_study_conditions = [x for x in User.StudyConditions if self.check_study_condition(x)]
return self._enabled_study_conditions
@property
def resolved_data_directory(self):
return os.path.join(settings.DROPBOX_DATA_FOLDER, self.data_directory)
@staticmethod
def get_default():
"""Get the default study management object that we shall be using. I
think this should be a 'manager', but it doesn't really matter now"""
return StudyManagement.objects.order_by('-pk')[0]
@staticmethod
def get_default_pk():
try:
return StudyManagement.get_default().pk
except Exception as e:
return None
@staticmethod
def convert_to_enabled_study_conditions(conditions):
"""Given a list of enabled study conditions, convert them to the int
value representing the ones that are enabled"""
value = 0
for condition in set(conditions):
value += (1 << (condition-1))
return value
def check_study_condition(self, condition):
"""Check that the condition, given by an int, is enabled"""
return (self.enabled_study_conditions & (1 << (condition-1))) > 0
def check_start_condition(self, condition):
"""Check that the condition, given by a string, is enabled"""
return condition in self.enabled_start_conditions_list
# Create the model for the user and the associated manager
class UserManager(models.Manager):
"""
A custom manager that removes the tight coupling to email that's present in
the base user manager.
"""
use_in_migrations = True
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
def _create_user(self, username, unique_key, password, **extra_fields):
"""
Create and save a user with the given username, unique_key, and password
"""
if not username:
raise ValueError("Username must be set")
username = self.model.normalize_username(username)
user = self.model(username=username, unique_key=unique_key, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, unique_key, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
password = username
extra_fields.pop('password', None)
return self._create_user(username, unique_key, password, **extra_fields)
def create_superuser(self, username, password, **extra_fields):
""""""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('study_condition', User.StudyConditions.DXAX_100)
extra_fields.setdefault('start_condition', User.StartConditions.AT_COUNTER_OCCLUDING_ABOVE_MUG)
if extra_fields.get('is_staff') is not True:
raise ValueError("Superuser must have is_staff=True")
if extra_fields.get('is_superuser') is not True:
raise ValueError("Superuser must have is_superuser=True")
unique_key = username
extra_fields.pop('unique_key', None)
return self._create_user(username, unique_key, password, **extra_fields)
def with_perm(self, perm, is_active=True, include_superusers=True, backend=None, obj=None):
"""Return a list of users with permissions"""
if backend is None:
backends = auth._get_backends(return_tuples=True)
assert len(backends) == 1, f"Expected 1 backend, got: {backends}"
backend, _ = backends[0]
elif not isinstance(backend, str):
raise TypeError(f"backend must be a dotted import path string. got {backend}")
else:
backend = auth.load_backend(backend)
if hasattr(backend, 'with_perm'):
return backend.with_perm(
perm,
is_active=is_active,
include_superusers=include_superusers,
obj=obj
)
return self.none()
class User(AbstractBaseUser, PermissionsMixin):
"""
The custom user class that we create. The changes from the default
definition are:
1. We have a specific unique_key instead of an email, that can be used by
MTurk workers to get compensation. We pretend the
2. Demographic information about the user, their responses to questions, the
condition, etc. are all recorded as part of the user field (to limit the
number of rows)
"""
# User Auth
username_validator = ASCIIUsernameValidator()
username = models.CharField(_('username'), max_length=30, unique=True, validators=[username_validator])
unique_key = models.CharField(_('unique_key'), max_length=30, unique=True)
# Permissions, etc.
is_staff = models.BooleanField(_('staff status'), default=False)
is_active = models.BooleanField(_('active'), default=True)
date_joined = models.DateTimeField(_('date joined'), auto_now_add=True) # Used in default User
date_modified = models.DateTimeField(_('date modified'), auto_now=True)
# Study condition tracking
class StudyConditions(models.IntegerChoices):
"""The study conditions"""
BASELINE = 1, _('Baseline')
# 100% correct
DX_100 = 2, _('DX, 100')
AX_100 = 3, _('AX, 100')
DXAX_100 = 4, _('DX & AX, 100')
# 90% correct
DX_90 = 5, _('DX, 90')
AX_90 = 6, _('AX, 90')
DXAX_90 = 7, _('DX & AX, 90')
# 80% correct
DX_80 = 8, _('DX, 80')
AX_80 = 9, _('AX, 80')
DXAX_80 = 10, _('DX & AX, 80')
# 70% correct
DX_70 = 11, _('DX, 70')
AX_70 = 12, _('AX, 70')
DXAX_70 = 13, _('DX & AX, 70')
# TODO: Add the actions with SA improvement conditions
__empty__ = _('(Unknown)')
study_condition = models.IntegerField(_('study condition'), blank=True, null=True, choices=StudyConditions.choices)
SHOW_DX_STUDY_CONDITIONS = [
StudyConditions.DX_100,
StudyConditions.DXAX_100,
StudyConditions.DX_90,
StudyConditions.DXAX_90,
StudyConditions.DX_80,
StudyConditions.DXAX_80,
StudyConditions.DX_70,
StudyConditions.DXAX_70,
]
SHOW_AX_STUDY_CONDITIONS = [
StudyConditions.AX_100,
StudyConditions.DXAX_100,
StudyConditions.AX_90,
StudyConditions.DXAX_90,
StudyConditions.AX_80,
StudyConditions.DXAX_80,
StudyConditions.AX_70,
StudyConditions.DXAX_70,
]
STUDY_CONDITIONS_NOISE_LEVELS = {
StudyConditions.DX_100: 0,
StudyConditions.AX_100: 0,
StudyConditions.DXAX_100: 0,
StudyConditions.DX_90: 0.1,
StudyConditions.AX_90: 0.1,
StudyConditions.DXAX_90: 0.1,
StudyConditions.DX_80: 0.2,
StudyConditions.AX_80: 0.2,
StudyConditions.DXAX_80: 0.2,
StudyConditions.DX_70: 0.3,
StudyConditions.AX_70: 0.3,
StudyConditions.DXAX_70: 0.3,
}
class StartConditions(models.TextChoices):
"""
The start conditions. Given by the state description in domain.State
"""
AT_COUNTER_ABOVE_MUG = 'kc.kc.default.above_mug.default.empty.dt'
AT_COUNTER_OCCLUDING = 'kc.kc.occluding.default.default.empty.dt'
AT_COUNTER_OCCLUDING_ABOVE_MUG = 'kc.kc.occluding.above_mug.default.empty.dt'
AT_COUNTER_MISLOCALIZED = 'dt.kc.default.default.default.empty.kc'
AT_TABLE = 'kc.dt.default.default.default.empty.dt'
AT_TABLE_ABOVE_MUG = 'kc.dt.default.above_mug.default.empty.dt'
AT_TABLE_OCCLUDING = 'kc.dt.occluding.default.default.empty.dt'
AT_TABLE_OCCLUDING_ABOVE_MUG = 'kc.dt.occluding.above_mug.default.empty.dt'
__empty__ = _('(Unknown)')
start_condition = models.CharField(_('start condition'), max_length=80, blank=True, null=True, choices=StartConditions.choices)
scenario_completed = models.BooleanField(_('scenario completed?'), blank=True, null=True, default=None)
date_started = models.DateTimeField(_('date started'), blank=True, null=True) # Starting the scenario
date_finished = models.DateTimeField(_('date finished'), blank=True, null=True) # Not necessarily completed the scenario
study_management = models.ForeignKey(StudyManagement, on_delete=models.SET_NULL, default=StudyManagement.get_default_pk, null=True, blank=True)
rng_state = models.IntegerField(default=Suggestions.DEFAULT_RNG_SEED)
# Demographics
class AgeGroups(models.IntegerChoices):
PREFER_NOT_TO_SAY = 0
BELOW_20 = 1, _("20 & below")
BTW_20_25 = 2, _("21 - 25")
BTW_25_30 = 3, _("26 - 30")
BTW_30_35 = 4, _("31 - 35")
BTW_35_40 = 5, _("36 - 40")
BTW_40_45 = 6, _("41 - 45")
BTW_45_50 = 7, _("46 - 50")
ABOVE_50 = 8, _("51 & over")
class Genders(models.TextChoices):
FEMALE = 'F'
MALE = 'M'
PREFER_NOT_TO_SAY = 'U', _("Other / Prefer Not to Say") # Unknown
class RobotExperienceGroups(models.IntegerChoices):
RARELY_OR_NEVER = 0
ONE_TO_THREE_TIMES_A_YEAR = 1, _("1 - 3 Times a Year")
MONTHLY = 2
WEEKLY = 3
DAILY = 4
amt_worker_id = models.CharField(_("Worker ID"), max_length=80, null=True, blank=True)
age_group = models.IntegerField(choices=AgeGroups.choices, blank=True, null=True)
gender = models.CharField(max_length=1, choices=Genders.choices, blank=True, null=True)
robot_experience = models.IntegerField(_("how often do you interact with robots?"), choices=RobotExperienceGroups.choices, blank=True, null=True)
date_demographics_completed = models.DateTimeField(_('date demographics completed'), blank=True, null=True)
# The knowledge review questions
supposed_to_grab_bowl = models.BooleanField(_("The robot's goal is to pick up the Bowl?"), blank=True, null=True)
supposed_to_go_to_couch = models.BooleanField(_("The robot's goal is to end up at the Couch?"), blank=True, null=True)
will_view_in_first_person = models.BooleanField(_("You will see a first-person view from the robot's camera?"), blank=True, null=True)
supposed_to_select_only_one_error = models.BooleanField(_("Even if there are multiple problems stopping the robot reaching its goal, you may only select one problem?"), blank=True, null=True)
actions_involve_invisible_arm_motion = models.BooleanField(_("Some actions might involve robot arm motions that are not visible on the camera?"), blank=True, null=True)
number_incorrect_knowledge_reviews = models.IntegerField(default=0)
ACCEPTABLE_REVIEW_ANSWERS = [
('supposed_to_grab_bowl', False),
('supposed_to_go_to_couch', True),
('will_view_in_first_person', True),
('supposed_to_select_only_one_error', False),
('actions_involve_invisible_arm_motion', True),
]
# Field to count the number of times the user has requested the state from
# the server. We start at -1 and increment every time `get_next_state_json`
# is invoked. This can then be used as the basis for injecting noise
number_state_requests = models.IntegerField(default=-1)
# Likert Responses
class LikertResponses(models.IntegerChoices):
STRONGLY_DISAGREE = 0
DISAGREE = 1
NEUTRAL = 2
AGREE = 3
STRONGLY_AGREE = 4
could_identify_problems = models.IntegerField(
_("I could always identify the problem(s) affecting the robot's ability to | |
from __future__ import annotations
from ..typecheck import *
from enum import IntEnum
from ..import core
from .import dap
from ..watch import Watch
from .debugger import Debugger
from .error import Error
from ..breakpoints import (
Breakpoints,
SourceBreakpoint,
)
from .variable import (
Variable,
SourceLocation,
)
from .configuration import (
AdapterConfiguration,
ConfigurationExpanded,
TaskExpanded
)
from .transport import TransportProtocol, TransportProtocolListener
class SessionListener (Protocol):
async def on_session_task_request(self, session: Session, task: TaskExpanded): ...
async def on_session_terminal_request(self, session: Session, request: dap.RunInTerminalRequestArguments) -> dap.RunInTerminalResponse: ...
def on_session_state_changed(self, session: Session, state: Session.State): ...
def on_session_selected_frame(self, session: Session, frame: Optional[dap.StackFrame]): ...
def on_session_output_event(self, session: Session, event: dap.OutputEvent): ...
def on_session_updated_modules(self, session: Session): ...
def on_session_updated_sources(self, session: Session): ...
def on_session_updated_variables(self, session: Session): ...
def on_session_updated_threads(self, session: Session): ...
class Session(TransportProtocolListener, core.Logger):
class State (IntEnum):
STARTING = 3
STOPPED = 0
STOPPING = 4
# puased/running is based on selected thread
PAUSED = 1
RUNNING = 2
stopped_reason_build_failed=0
stopped_reason_launch_error=1
stopped_reason_dispose=2
stopped_reason_cancel=3
stopped_reason_terminated_event=4
stopped_reason_manual=5
def __init__(self,
adapter_configuration: AdapterConfiguration,
configuration: ConfigurationExpanded,
restart: Any|None,
no_debug: bool,
breakpoints: Breakpoints,
watch: Watch,
listener: SessionListener,
transport_log: core.Logger,
debugger: Debugger,
parent: Session|None = None
) -> None:
self.adapter_configuration = adapter_configuration
self.configuration = configuration
self.restart = restart
self.no_debug = no_debug
self.listener = listener
self.children: list[Session] = []
self.parent = parent
self.debugger = debugger
if parent:
parent.children.append(self)
self.transport_log = transport_log
self.state_changed = core.Event[int]()
self.breakpoints = breakpoints
self.breakpoints_for_id: dict[int, SourceBreakpoint] = {}
self.breakpoints.data.on_send.add(self.on_send_data_breakpoints)
self.breakpoints.function.on_send.add(self.on_send_function_breakpoints)
self.breakpoints.filters.on_send.add(self.on_send_filters)
self.breakpoints.source.on_send.add(self.on_send_source_breakpoint)
self.watch = watch
self.watch.on_added.add(lambda expr: self.watch.evaluate_expression(self, expr))
self._transport: Optional[TransportProtocol] = None
self.launching_async: Optional[core.Future] = None
self.capabilities = dap.Capabilities()
self.stop_requested = False
self.launch_request = True
self._state = Session.State.STARTING
self._status = 'Starting'
self.disposeables: list[Any] = []
self.complete: core.Future[None] = core.Future()
self.threads_for_id: dict[int, Thread] = {}
self.all_threads_stopped = False
self.selected_explicitly = False
self.selected_thread = None
self.selected_frame = None
self.threads: list[Thread] = []
self.variables: list[Variable] = []
self.sources: dict[int|str, dap.Source] = {}
self.modules: dict[int|str, dap.Module] = {}
@property
def name(self) -> str:
return self.configuration.name
@property
def state(self) -> State:
return self._state
@state.setter
def state(self, state: State) -> None:
if self._state == state:
return
self._state = state
self.listener.on_session_state_changed(self, state)
@property
def status(self) -> str|None:
return self._status
def _change_status(self, status: str):
self._status = status
self.listener.on_session_state_changed(self, self._state)
async def launch(self) -> None:
try:
self.launching_async = core.run(self._launch())
await self.launching_async
except core.Error as e:
self.launching_async = None
core.exception(e)
self.error('... an error occured, ' + str(e))
await self.stop_forced(reason=Session.stopped_reason_launch_error)
except core.CancelledError:
...
self.launching_async = None
async def _launch(self) -> None:
assert self.state == Session.State.STOPPED, 'debugger not in stopped state?'
self.state = Session.State.STARTING
self.configuration = await self.adapter_configuration.configuration_resolve(self.configuration)
if not self.adapter_configuration.installed_version:
raise core.Error('Debug adapter with type name "{}" is not installed. You can install it by running Debugger: Install Adapters'.format(self.adapter_configuration.type))
if not await self.run_pre_debug_task():
self.info('Pre debug command failed, not starting session')
self.launching_async = None
await self.stop_forced(reason=Session.stopped_reason_build_failed)
return
self._change_status('Starting')
try:
transport = await self.adapter_configuration.start(log=self.transport_log, configuration=self.configuration)
except Exception as e:
raise core.Error(f'Unable to start the adapter process: {e}')
self._transport = TransportProtocol(
transport,
self,
self.transport_log
)
capabilities: dap.Capabilities = await self.request('initialize', {
'clientID': 'sublime',
'clientName': 'Sublime Text',
'adapterID': self.configuration.type,
'pathFormat': 'path',
'linesStartAt1': True,
'columnsStartAt1': True,
'supportsVariableType': True,
'supportsVariablePaging': False,
'supportsRunInTerminalRequest': True,
'supportsMemoryReferences': True,
'locale': 'en-us'
})
self.capabilities = capabilities
# remove/add any exception breakpoint filters
self.breakpoints.filters.update(capabilities.exceptionBreakpointFilters or [])
if self.restart:
self.configuration['__restart'] = self.restart
if self.no_debug:
self.configuration['noDebug'] = True
if self.configuration.request == 'launch':
self.launch_request = True
await self.request('launch', self.configuration)
elif self.configuration.request == 'attach':
self.launch_request = False
await self.request('attach', self.configuration)
else:
raise core.Error('expected configuration to have request of either "launch" or "attach" found {}'.format(self.configuration.request))
self.adapter_configuration.did_start_debugging(self)
# get the baseline threads after launch/attach
# according to https://microsoft.github.io/debug-adapter-protocol/overview
self.refresh_threads()
# At this point we are running?
self._change_status('Running')
self.state = Session.State.RUNNING
async def request(self, command: str, arguments: Any) -> Any:
if not self._transport:
raise core.Error(f'Debug Session {self.status}')
return await self._transport.send_request_asyc(command, arguments)
async def wait(self) -> None:
await self.complete
async def run_pre_debug_task(self) -> bool:
pre_debug_command = self.configuration.pre_debug_task
if pre_debug_command:
self._change_status('Running pre debug command')
r = await self.run_task('Pre debug command', pre_debug_command)
return r
return True
async def run_post_debug_task(self) -> bool:
post_debug_command = self.configuration.post_debug_task
if post_debug_command:
self._change_status('Running post debug command')
r = await self.run_task('Post debug command', post_debug_command)
return r
return True
async def run_task(self, name: str, task: TaskExpanded) -> bool:
try:
await self.listener.on_session_task_request(self, task)
return True
except core.CancelledError:
self.error(f'{name}: cancelled')
return False
except Exception as e:
core.exception()
self.error(f'{name}: {e}')
return False
def _refresh_state(self) -> None:
try:
thread = self.command_thread
if thread.stopped:
self._change_status('Paused')
self.state = Session.State.PAUSED
else:
self._change_status('Running')
self.state = Session.State.RUNNING
except core.Error as e:
self.state = Session.State.RUNNING
async def add_breakpoints(self) -> None:
assert self._transport
requests: list[Awaitable[Any]] = []
requests.append(self.set_exception_breakpoint_filters())
requests.append(self.set_function_breakpoints())
for file, filebreaks in self.breakpoints.source.breakpoints_per_file().items():
requests.append(self.set_breakpoints_for_file(file, filebreaks))
if self.capabilities.supportsDataBreakpoints:
requests.append(self.set_data_breakpoints())
if requests:
await core.wait(requests)
async def set_exception_breakpoint_filters(self) -> None:
if not self._transport:
return
filters: list[str] = []
filterOptions: list[dap.ExceptionFilterOptions] = []
for f in self.breakpoints.filters:
if f.enabled:
filters.append(f.dap.filter)
filterOptions.append(dap.ExceptionFilterOptions(
f.dap.filter,
f.condition,
))
await self.request('setExceptionBreakpoints', {
'filters': filters,
'filterOptions': filterOptions
})
async def set_function_breakpoints(self) -> None:
if not self._transport:
return
breakpoints = list(filter(lambda b: b.enabled, self.breakpoints.function))
if not self.capabilities.supportsFunctionBreakpoints:
# only show error message if the user tried to set a function breakpoint when they are not supported
if breakpoints:
self.error('This debugger does not support function breakpoints')
return
dap_breakpoints = list(map(lambda b: b.dap, breakpoints))
response = await self.request('setFunctionBreakpoints', {
'breakpoints': dap_breakpoints
})
results: list[dap.Breakpoint] = response['breakpoints']
for result, b in zip(results, breakpoints):
self.breakpoints.function.set_result(b, result)
async def set_data_breakpoints(self) -> None:
if not self._transport:
return
breakpoints = list(filter(lambda b: b.enabled, self.breakpoints.data))
dap_breakpoints = list(map(lambda b: b.dap, breakpoints))
response = await self.request('setDataBreakpoints', {
'breakpoints': dap_breakpoints
})
results: list[dap.Breakpoint] = response['breakpoints']
for result, b in zip(results, breakpoints):
self.breakpoints.data.set_result(b, result)
async def set_breakpoints_for_file(self, file: str, breakpoints: list[SourceBreakpoint]) -> None:
if not self._transport:
return
enabled_breakpoints: list[SourceBreakpoint] = []
dap_breakpoints: list[dap.SourceBreakpoint] = []
for breakpoint in breakpoints:
if breakpoint.dap.hitCondition and not self.capabilities.supportsHitConditionalBreakpoints:
self.error('This debugger does not support hit condition breakpoints')
if breakpoint.dap.logMessage and not self.capabilities.supportsLogPoints:
self.error('This debugger does not support log points')
if breakpoint.dap.condition and not self.capabilities.supportsConditionalBreakpoints:
self.error('This debugger does not support conditional breakpoints')
if breakpoint.enabled:
enabled_breakpoints.append(breakpoint)
dap_breakpoints.append(breakpoint.dap)
try:
response = await self.request('setBreakpoints', {
'source': { 'path': file },
'breakpoints': dap_breakpoints
})
results: list[dap.Breakpoint] = response['breakpoints']
if len(results) != len(enabled_breakpoints):
raise Error('expected #breakpoints to match results')
for result, b in zip(results, enabled_breakpoints):
self.breakpoints.source.set_result(b, result)
if result.id:
self.breakpoints_for_id[result.id] = b
except Error as e:
for b in enabled_breakpoints:
self.breakpoints.source.set_result(b, dap.Breakpoint())
def on_send_data_breakpoints(self, any: Any):
core.run(self.set_data_breakpoints())
def on_send_function_breakpoints(self, any: Any):
core.run(self.set_function_breakpoints())
def on_send_filters(self, any: Any):
core.run(self.set_exception_breakpoint_filters())
def on_send_source_breakpoint(self, breakpoint: SourceBreakpoint) -> None:
file = breakpoint.file
core.run(self.set_breakpoints_for_file(file, self.breakpoints.source.breakpoints_for_file(file)))
async def stop(self):
# this seems to be what the spec says to do in the overview
# https://microsoft.github.io/debug-adapter-protocol/overview
# haven't started session yet
if self._transport is None:
await self.stop_forced(reason=Session.stopped_reason_manual)
return
# If the stop is called multiple times then we call disconnect to forefully disconnect
if self.stop_requested:
await self.stop_forced(reason=Session.stopped_reason_manual)
return
self._change_status('Stop Requested')
self.stop_requested = True
# first try to terminate if we can
if self.launch_request and self.capabilities.supportsTerminateRequest:
try:
await self.request('terminate', {
'restart': False
})
return
except Error as e:
core.exception()
# we couldn't terminate either not a launch request or the terminate request failed
# so we foreceully disconnect
await self.request('disconnect', {
'restart': False
})
def stop_debug_adapter_session(self):
if self.launching_async:
self.launching_async.cancel()
self.breakpoints_for_id = {}
self.watch.clear_session_data(self)
self.breakpoints.clear_session_data()
self.stop_requested = False
if self._transport:
self.adapter_configuration.did_stop_debugging(self)
self._transport.dispose()
self._transport = None
async def stop_forced(self, reason: int) -> None:
if self.state == Session.State.STOPPING or self.state == Session.State.STOPPED:
return
self.stopped_reason = reason
self.state = Session.State.STOPPING
self.stop_debug_adapter_session()
await self.run_post_debug_task()
self._change_status('Ended')
self.state = Session.State.STOPPED
if not self.complete.done():
self.complete.set_result(None)
def dispose(self) -> None:
self.stop_debug_adapter_session()
for disposeable in self.disposeables:
disposeable.dispose()
if self.parent:
self.parent.children.remove(self)
self.parent = None
# clean up hierarchy if needed
for child in self.children:
child.parent = None
async def resume(self):
body = await self.request('continue', {
'threadId': self.command_thread.id
})
# some adapters aren't giving a response here
if body:
allThreadsContinued = body.get('allThreadsContinued', True)
else:
allThreadsContinued = True
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, allThreadsContinued))
async def pause(self):
await self.request('pause', {
'threadId': self.command_thread.id
})
async def step_over(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('next', {
'threadId': self.command_thread.id
})
async def step_in(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('stepIn', {
'threadId': self.command_thread.id
})
async def step_out(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('stepOut', {
'threadId': self.command_thread.id
})
async def evaluate(self, expression: str, context: str = 'repl'):
result = await self.evaluate_expression(expression, context)
if not result:
raise Error('expression did not return a result')
# variablesReference doesn't appear to be optional in the spec... but some adapters treat it as such
event = dap.OutputEvent(result.result + '\n', 'console', variablesReference=result.variablesReference)
self.listener.on_session_output_event(self, event)
async def evaluate_expression(self, expression: str, context: str|None) -> dap.EvaluateResponse:
frameId: int|None = None
if self.selected_frame:
frameId = self.selected_frame.id
response = await self.request('evaluate', {
'expression': expression,
'context': context,
'frameId': frameId,
})
# the spec doesn't say this is optional? But it seems that some implementations throw errors instead of marking things as not verified?
if response['result'] is None:
raise Error('expression did not return a result')
return response
async def read_memory(self, memory_reference: str, count: int, offset: int) -> dap.ReadMemoryResponse:
v = await self.request('readMemory', {
'memoryReference': memory_reference,
'count': count,
'offset': offset
})
return v
async def stack_trace(self, thread_id: int) -> list[dap.StackFrame]:
body = await self.request('stackTrace', {
'threadId': thread_id,
})
return body['stackFrames']
async def completions(self, text: str, column: int) -> list[dap.CompletionItem]:
frameId = None
if self.selected_frame:
frameId = self.selected_frame.id
response = await self.request('completions', {
'frameId': frameId,
'text': text,
'column': column,
})
return response['targets']
async def set_variable(self, variablesReference: int, name: str, value: str) -> dap.SetVariableResponse:
response = await self.request('setVariable', {
'variablesReference': variablesReference,
'name': name,
'value': value,
})
return response
async def data_breakpoint_info(self, variablesReference: int, name: str) -> dap.DataBreakpointInfoResponse:
response = await self.request('dataBreakpointInfo', {
'variablesReference': variablesReference,
'name': name,
})
return response
def log(self, type: str, value: str) -> None:
if type == 'process':
self.transport_log.info(f'⟹ process/stderr :: {value.strip()}')
return
if type == 'error':
output = dap.OutputEvent(value + '\n', 'debugger.error')
self.listener.on_session_output_event(self, output)
return
output = dap.OutputEvent(value + '\n', 'debugger.info')
self.listener.on_session_output_event(self, output)
def load_frame(self, frame: Optional[dap.StackFrame]):
self.listener.on_session_selected_frame(self, frame)
if frame:
core.run(self.refresh_scopes(frame))
core.run(self.watch.evaluate(self, frame))
else:
self.variables.clear()
self.listener.on_session_updated_variables(self)
async def refresh_scopes(self, frame: dap.StackFrame):
body = await self.request('scopes', {
'frameId': frame.id
})
scopes: list[dap.Scope] = body['scopes']
self.variables = [Variable.from_scope(self, scope) for scope in scopes]
self.listener.on_session_updated_variables(self)
async def get_source(self, source: dap.Source) -> tuple[str, str|None]:
body = await self.request('source', {
'source': {
'path': source.path,
'sourceReference': source.sourceReference
},
'sourceReference': source.sourceReference
})
return body['content'], body.get('mimeType')
async def get_variables(self, variablesReference: int, without_names: bool = False) -> list[Variable]:
response = await self.request('variables', {
'variablesReference': variablesReference
})
variables: list[dap.Variable] = response['variables']
# vscode seems to remove the names from variables in output events
if without_names:
for v in variables:
v.name = ''
v.value = v.value.split('\n')[0]
return [Variable.from_variable(self, variablesReference, v) for v in variables]
def on_breakpoint_event(self, event: dap.BreakpointEvent):
assert event.breakpoint.id
b = self.breakpoints_for_id.get(event.breakpoint.id)
if b:
self.breakpoints.source.set_result(b, event.breakpoint)
def on_module_event(self, event: dap.ModuleEvent):
if event.reason == 'new':
self.modules[event.module.id] = event.module
if event.reason == 'removed':
try:
del self.modules[event.module.id]
except KeyError:
...
if event.reason == 'changed':
self.modules[event.module.id] = event.module
self.listener.on_session_updated_modules(self)
def on_loaded_source_event(self, event: dap.LoadedSourceEvent):
id = f'{event.source.name}~{event.source.path}~{event.source.sourceReference}'
if event.reason == 'new':
self.sources[id] = event.source
elif event.reason == 'removed':
try:
del self.sources[id]
except KeyError:
...
elif event.reason == 'changed':
self.sources[id] = event.source
self.listener.on_session_updated_sources(self)
# this is a bit of a weird case. Initialized will happen at some point in time
# it depends on when the debug adapter chooses it is ready for configuration information
# when it does happen we can then add all the breakpoints and complete the configuration
# | |
addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method jest dla use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type w assertEqual().
function: The callable taking two arguments oraz an optional
msg= argument that podnieśs self.failureException przy a
useful error message when the two arguments are nie equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, przy arguments, to be called when the test jest
completed. Functions added are called on a LIFO basis oraz are
called after tearDown on test failure albo success.
Cleanup items are called even jeżeli setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method dla setting up the test fixture before exercising it."
dalej
def tearDown(self):
"Hook method dla deconstructing the test fixture after testing it."
dalej
@classmethod
def setUpClass(cls):
"Hook method dla setting up klasa fixture before running tests w the class."
@classmethod
def tearDownClass(cls):
"Hook method dla deconstructing the klasa fixture after running all tests w the class."
def countTestCases(self):
zwróć 1
def defaultTestResult(self):
zwróć result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, albo Nic jeżeli no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
zwróć doc oraz doc.split("\n")[0].strip() albo Nic
def id(self):
zwróć "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
jeżeli type(self) jest nie type(other):
zwróć NotImplemented
zwróć self._testMethodName == other._testMethodName
def __hash__(self):
zwróć hash((type(self), self._testMethodName))
def __str__(self):
zwróć "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
zwróć "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, test_case, reason):
addSkip = getattr(result, 'addSkip', Nic)
jeżeli addSkip jest nie Nic:
addSkip(test_case, reason)
inaczej:
warnings.warn("TestResult has no addSkip method, skips nie reported",
RuntimeWarning, 2)
result.addSuccess(test_case)
@contextlib.contextmanager
def subTest(self, msg=Nic, **params):
"""Return a context manager that will zwróć the enclosed block
of code w a subtest identified by the optional message oraz
keyword parameters. A failure w the subtest marks the test
case jako failed but resumes execution at the end of the enclosed
block, allowing further test code to be executed.
"""
jeżeli nie self._outcome.result_supports_subtests:
uzyskaj
zwróć
parent = self._subtest
jeżeli parent jest Nic:
params_map = collections.ChainMap(params)
inaczej:
params_map = parent.params.new_child(params)
self._subtest = _SubTest(self, msg, params_map)
spróbuj:
przy self._outcome.testPartExecutor(self._subtest, isTest=Prawda):
uzyskaj
jeżeli nie self._outcome.success:
result = self._outcome.result
jeżeli result jest nie Nic oraz result.failfast:
podnieś _ShouldStop
albo_inaczej self._outcome.expectedFailure:
# If the test jest expecting a failure, we really want to
# stop now oraz register the expected failure.
podnieś _ShouldStop
w_końcu:
self._subtest = parent
def _feedErrorsToResult(self, result, errors):
dla test, exc_info w errors:
jeżeli isinstance(test, _SubTest):
result.addSubTest(test.test_case, test, exc_info)
albo_inaczej exc_info jest nie Nic:
jeżeli issubclass(exc_info[0], self.failureException):
result.addFailure(test, exc_info)
inaczej:
result.addError(test, exc_info)
def _addExpectedFailure(self, result, exc_info):
spróbuj:
addExpectedFailure = result.addExpectedFailure
wyjąwszy AttributeError:
warnings.warn("TestResult has no addExpectedFailure method, reporting jako dalejes",
RuntimeWarning)
result.addSuccess(self)
inaczej:
addExpectedFailure(self, exc_info)
def _addUnexpectedSuccess(self, result):
spróbuj:
addUnexpectedSuccess = result.addUnexpectedSuccess
wyjąwszy AttributeError:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting jako failure",
RuntimeWarning)
# We need to dalej an actual exception oraz traceback to addFailure,
# otherwise the legacy result can choke.
spróbuj:
podnieś _UnexpectedSuccess z Nic
wyjąwszy _UnexpectedSuccess:
result.addFailure(self, sys.exc_info())
inaczej:
addUnexpectedSuccess(self)
def run(self, result=Nic):
orig_result = result
jeżeli result jest Nic:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', Nic)
jeżeli startTestRun jest nie Nic:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
jeżeli (getattr(self.__class__, "__unittest_skip__", Nieprawda) albo
getattr(testMethod, "__unittest_skip__", Nieprawda)):
# If the klasa albo method was skipped.
spróbuj:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
albo getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, self, skip_why)
w_końcu:
result.stopTest(self)
zwróć
expecting_failure = getattr(testMethod,
"__unittest_expecting_failure__", Nieprawda)
outcome = _Outcome(result)
spróbuj:
self._outcome = outcome
przy outcome.testPartExecutor(self):
self.setUp()
jeżeli outcome.success:
outcome.expecting_failure = expecting_failure
przy outcome.testPartExecutor(self, isTest=Prawda):
testMethod()
outcome.expecting_failure = Nieprawda
przy outcome.testPartExecutor(self):
self.tearDown()
self.doCleanups()
dla test, reason w outcome.skipped:
self._addSkip(result, test, reason)
self._feedErrorsToResult(result, outcome.errors)
jeżeli outcome.success:
jeżeli expecting_failure:
jeżeli outcome.expectedFailure:
self._addExpectedFailure(result, outcome.expectedFailure)
inaczej:
self._addUnexpectedSuccess(result)
inaczej:
result.addSuccess(self)
zwróć result
w_końcu:
result.stopTest(self)
jeżeli orig_result jest Nic:
stopTestRun = getattr(result, 'stopTestRun', Nic)
jeżeli stopTestRun jest nie Nic:
stopTestRun()
# explicitly przerwij reference cycles:
# outcome.errors -> frame -> outcome -> outcome.errors
# outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
outcome.errors.clear()
outcome.expectedFailure = Nic
# clear the outcome, no more needed
self._outcome = Nic
def doCleanups(self):
"""Execute all cleanup functions. Normally called dla you after
tearDown."""
outcome = self._outcome albo _Outcome()
dopóki self._cleanups:
function, args, kwargs = self._cleanups.pop()
przy outcome.testPartExecutor(self):
function(*args, **kwargs)
# zwróć this dla backwards compatibility
# even though we no longer us it internally
zwróć outcome.success
def __call__(self, *args, **kwds):
zwróć self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors w a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
dopóki self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
podnieś SkipTest(reason)
def fail(self, msg=Nic):
"""Fail immediately, przy the given message."""
podnieś self.failureException(msg)
def assertNieprawda(self, expr, msg=Nic):
"""Check that the expression jest false."""
jeżeli expr:
msg = self._formatMessage(msg, "%s jest nie false" % safe_repr(expr))
podnieś self.failureException(msg)
def assertPrawda(self, expr, msg=Nic):
"""Check that the expression jest true."""
jeżeli nie expr:
msg = self._formatMessage(msg, "%s jest nie true" % safe_repr(expr))
podnieś self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage jest Nieprawda this means:
* Use only an explicit message jeżeli it jest provided
* Otherwise use the standard message dla the assert
If longMessage jest Prawda:
* Use the standard message
* If an explicit message jest provided, plus ' : ' oraz the explicit message
"""
jeżeli nie self.longMessage:
zwróć msg albo standardMsg
jeżeli msg jest Nic:
zwróć standardMsg
spróbuj:
# don't switch to '{}' formatting w Python 2.X
# it changes the way unicode input jest handled
zwróć '%s : %s' % (standardMsg, msg)
wyjąwszy UnicodeDecodeError:
zwróć '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, expected_exception, *args, **kwargs):
"""Fail unless an exception of klasa expected_exception jest podnieśd
by the callable when invoked przy specified positional oraz
keyword arguments. If a different type of exception jest
podnieśd, it will nie be caught, oraz the test case will be
deemed to have suffered an error, exactly jako dla an
unexpected exception.
If called przy the callable oraz arguments omitted, will zwróć a
context object used like this::
przy self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
jest used jako a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
przy self.assertRaises(SomeException) jako cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(expected_exception, self)
zwróć context.handle('assertRaises', args, kwargs)
def assertWarns(self, expected_warning, *args, **kwargs):
"""Fail unless a warning of klasa warnClass jest triggered
by the callable when invoked przy specified positional oraz
keyword arguments. If a different type of warning jest
triggered, it will nie be handled: depending on the other
warning filtering rules w effect, it might be silenced, printed
out, albo podnieśd jako an exception.
If called przy the callable oraz arguments omitted, will zwróć a
context object used like this::
przy self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
jest used jako a context object.
The context manager keeps a reference to the first matching
warning jako the 'warning' attribute; similarly, the 'filename'
oraz 'lineno' attributes give you information about the line
of Python code z which the warning was triggered.
This allows you to inspect the warning after the assertion::
przy self.assertWarns(SomeWarning) jako cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self)
zwróć context.handle('assertWarns', args, kwargs)
def assertLogs(self, logger=Nic, level=Nic):
"""Fail unless a log message of level *level* albo higher jest emitted
on *logger_name* albo its children. If omitted, | |
= args[0]
address = args[1]
maker_fee = args[2]
taker_fee = args[3]
return register_marketplace(marketplace, address, maker_fee, taker_fee)
if operation == "set_contract_state":
contract_state = args[0]
set_contract_state(contract_state)
return True
# State of the contract.
if operation == "get_state":
return get_contract_state()
# ========= Decentralized Games ==========
# Battle Royale
if operation == "BR_create":
# To start the event, the marketplace owner, uploads rewards and requirements.
event_code = args[0]
marketplace_owner_address = args[1]
marketplace = args[2]
# Remove the first 3 keyword args, the following should be items.
for i in range(0, 3):
args.remove(0)
result = BR_create(event_code, marketplace, marketplace_owner_address, args)
payload = ["BR", event_code, "BR_create", marketplace_owner_address, result]
Notify(payload)
return result
if operation == "BR_sign_up":
if len(args) == 2:
event_code = args[0]
address = args[1]
result = BR_sign_up(event_code, address)
details = ["BR", event_code, "BR_sign_up", address, result]
Notify(details)
return result
if operation == "BR_start":
if len(args) == 2:
event_code = args[0]
address = args[1]
result = BR_start(event_code, address)
details = ["BR", event_code, "BR_start", address, result]
Notify(details)
return result
if operation == "BR_choose_initial_zone":
if len(args) == 3:
event_code = args[0]
address = args[1]
zone = args[2]
# The first action which will be resolved the next round.
return BR_choose_initial_grid_position(event_code, address, zone)
if operation == "BR_do_action":
if len(args) == 4:
event_code = args[0]
address = args[1]
action = args[2]
direction = args[3]
return BR_do_action(event_code, address, action, direction)
if operation == "BR_finish_round":
if len(args) == 1:
event_code = args[0]
return BR_finish_round(event_code)
if operation == "BR_get_leaderboard":
if len(args) == 1:
context = GetContext()
event_code = args[0]
leaderboard = get_BR_leaderboard(context, event_code)
if leaderboard != b'':
leaderboard = Deserialize(leaderboard)
else:
leaderboard = []
payload = ["BR", event_code, 'leaderboard', leaderboard]
Notify(payload)
return True
if operation == "BR_get_event_details":
if len(args) == 1:
context = GetContext()
event_code = args[0]
event_details = get_BR_event_details(context, event_code)
payload = ["BR", event_code, "event_details", event_details]
Notify(payload)
return True
return False
# Owner will not be allowed to withdraw anything.
if trigger == Verification():
pass
# check if the invoker is the owner of this contract
# is_owner = CheckWitness(contract_owner)
# If owner, proceed
# if is_owner:
# return True
return False
def exchange(marketplace, marketplace_owner_address, marketplace_owner_signature,
marketplace_owner_public_key, originator_address, originator_signature,
originator_public_key, taker_address, taker_signature, taker_public_key,
originator_order_salt, taker_order_salt, item_id, price):
"""
Verify the signatures of two parties and securely swap the item, and tokens between them.
"""
if order_complete(originator_order_salt):
print("ERROR! This transaction has already occurred!")
return False
if order_complete(taker_order_salt):
print("ERROR! This transaction has already occurred!")
return False
originator_args = ["put_offer", marketplace, item_id, price, originator_order_salt]
if not verify_order(originator_address, originator_signature, originator_public_key, originator_args):
print("ERROR! originator has not signed the order")
return False
taker_args = ["buy_offer", marketplace, item_id, price, taker_order_salt]
if not verify_order(taker_address, taker_signature, taker_public_key, taker_args):
print("ERROR! Taker has not signed the order!")
return False
# A marketplace owner must verify so there are no jumps in the queue.
marketplace_owner_args = ["exchange", marketplace, item_id, price, originator_address, taker_address]
if not verify_order(marketplace_owner_address, marketplace_owner_signature, marketplace_owner_public_key,
marketplace_owner_args):
print("ERROR! Marketplace owner has not signed the order!")
return False
if not trade(marketplace, originator_address, taker_address, item_id):
print("ERROR! Items could not be transferred.")
return False
if not transfer_token(taker_address, originator_address, price):
print("ERROR! Tokens could not be transferred.")
return False
# Set the orders as complete so they can only occur once.
set_order_complete(originator_order_salt)
set_order_complete(taker_order_salt)
return True
def trade_verified(marketplace, originator_address, taker_address, item_id,
marketplace_owner_address, marketplace_owner_signature,
marketplace_owner_public_key, originator_signature,
originator_public_key, salt):
"""
Transfer an item from an address, to an address on a marketplace.
"""
if not is_marketplace_owner(marketplace, marketplace_owner_address):
print("ERROR! Only a marketplace owner is allowed to give items.")
return False
if order_complete(salt):
print("ERROR! This order has already occurred!")
return False
args = ["trade", marketplace, originator_address, taker_address, item_id, salt]
if not verify_order(marketplace_owner_address, marketplace_owner_signature, marketplace_owner_public_key, args):
print("ERROR! The marketplace owner has not permitted the transaction.")
return False
if not verify_order(originator_address, originator_signature, originator_public_key, args):
print("ERROR! The address removing has not signed this!")
return False
if trade(marketplace, originator_address, taker_address, item_id):
set_order_complete(salt)
return True
print("ERROR! Could not complete the trade")
return False
def trade(marketplace, originator_address, taker_address, item_id):
"""
Trade an item from one address to another, on a specific marketplace.
"""
# If the item is being transferred to the same address, don't waste gas and return True.
if originator_address == taker_address:
return True
# If the removal of the item from the address sending is successful, give the item to the address receiving.
if remove_item(marketplace, originator_address, item_id):
if give_item(marketplace, taker_address, item_id):
return True
def give_item_verified(marketplace, taker_address, item_id,
owner_address, owner_signature,
owner_public_key, salt):
"""
Give an item to an address on a specific marketplace, verified by a marketplace owner.
"""
if not is_marketplace_owner(marketplace, owner_address):
print("Only a marketplace owner is allowed to give items.")
return False
if order_complete(salt):
print("This order has already occurred!")
return False
args = ["give_item", marketplace, taker_address, item_id, 0, salt]
if not verify_order(owner_address, owner_signature, owner_public_key, args):
print("A marketplace owner has not signed this order.")
return False
set_order_complete(salt)
give_item(marketplace, taker_address, item_id)
return True
def give_item(marketplace, taker_address, item_id):
"""
Give an item to an address on a specific marketplace.
"""
# Get the players inventory from storage.
inventory_s = get_inventory(marketplace, taker_address)
# If the address owns no items create a new list, else grab the pre-existing list and append the new item.
if inventory_s == b'':
inventory = [item_id]
else:
inventory = Deserialize(inventory_s)
inventory.append(item_id)
# Serialize and save the inventory back to the storage.
inventory_s = Serialize(inventory)
save_inventory(marketplace, taker_address, inventory_s)
return True
def remove_item_verified(marketplace, address, item_id, salt, owner_address,
owner_signature, owner_public_key, signature, public_key):
"""
Remove an item from an address on a marketplace.
"""
if not is_marketplace_owner(marketplace, owner_address):
print("ERROR! Only a marketplace owner is allowed to give items.")
return False
if order_complete(salt):
print("ERROR! This order has already occurred!")
return False
args = ["remove_item", marketplace, address, item_id, 0, salt]
if not verify_order(owner_address, owner_signature, owner_public_key, args):
print("ERROR! A marketplace owner has not signed this order.")
return False
owner_args = ["remove_item", marketplace, address, item_id, 0, salt]
if not verify_order(address, signature, public_key, owner_args):
print("ERROR! The address removing has not signed this!")
return False
if remove_item(marketplace, address, item_id):
set_order_complete(salt)
return True
return False
def remove_item(marketplace, address, item_id):
"""
Remove an item from an address on a specific marketplace.
"""
inventory_s = get_inventory(marketplace, address)
if inventory_s != b'':
inventory = Deserialize(inventory_s)
current_index = 0
for item in inventory:
if item == item_id:
inventory.remove(current_index)
inventory_s = Serialize(inventory)
save_inventory(marketplace, address, inventory_s)
return True
current_index += 1
return False
def verify_order(address, signature, public_key, args):
"""
Verify that an order is properly signed by a signature and public key.
We also ensure the public key can be recreated into the script hash
so we know that it is the address that signed it.
"""
message = ""
for arg in args:
message = concat(message, arg)
# Create the script hash from the given public key, to verify the address.
redeem_script = b'21' + public_key + b'ac'
script_hash = hash160(redeem_script)
# Must verify the address we are doing something for is the public key whom signed the order.
if script_hash != address:
print("ERROR! The public key does not match with the address who signed the order.")
return False
if not verify_signature(public_key, signature, message):
print("ERROR! Signature has not signed the order.")
return False
return True
def get_contract_state():
"""Current state of the contract."""
context = GetContext()
state = Get(context, contract_state_key)
return state
def set_contract_state(state):
""" Set the state of the contract. """
context = GetContext()
Delete(context, contract_state_key)
Put(context, contract_state_key, state)
return True
def set_order_complete(salt):
""" So an order is not repeated, user has signed a salt. """
context = GetContext()
key = concat(order_key, salt)
Put(context, key, True)
return True
def order_complete(salt):
""" Check if an order has already been completed."""
context = GetContext()
key = concat(order_key, salt)
exists = Get(context, key)
if exists != b'':
return True
return False
# return exists != b''
def increase_balance(address, amount):
"""
Called on deposit to increase the amount of LOOT in storage of an address.
"""
context = GetContext()
# LOOT balance is mapped directly to an address
key = address
current_balance = Get(context, key)
new_balance = current_balance + amount
Put(context, key, new_balance)
# Notify that address there deposit is | |
<filename>test/test_3gpp_channel_lsp.py
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import numpy as np
import sionna
from channel_test_utils import *
from scipy.stats import kstest, norm
class TestLSP(unittest.TestCase):
r"""Test the distribution, cross-correlation, and spatial correlation of
3GPP channel models' LSPs
"""
# Carrier frequency
CARRIER_FREQUENCY = 3.5e9 # Hz
# Heigh of UTs
H_UT = 1.5
# Heigh of BSs
H_BS = 35.0
# Batch size for generating samples of LSPs and pathlosses
BATCH_SIZE = 500000
# More than one UT is required for testing the spatial and cross-correlation
# of LSPs
NB_UT = 5
# The LSPs follow either a Gaussian or a truncated Gaussian
# distribution. A Kolmogorov-Smirnov (KS) test is used to check that the
# LSP follow the appropriate distribution. This is the threshold below
# which the KS statistic `D` should be for passing the test.
MAX_ERR_KS = 1e-2
# # Maximum allowed deviation for cross-correlation of LSP parameters
MAX_ERR_CROSS_CORR = 3e-2
# # Maximum allowed deviation for spatial correlation of LSP parameters
MAX_ERR_SPAT_CORR = 3e-2
# LoS probability
MAX_ERR_LOS_PROB = 1e-2
# ZOD Offset maximum relative error
MAX_ERR_ZOD_OFFSET = 1e-2
# Maximum allowed deviation for pathloss
MAX_ERR_PATHLOSS_MEAN = 1.0
MAX_ERR_PATHLOSS_STD = 1e-1
def limited_normal(self, batch_size, minval, maxval, mu, std):
r"""
Return a limited normal distribution. This is different from a truncated
normal distribution, as the samples exceed ``minval`` and ``maxval`` are
clipped.
More precisely, ``x`` is generated as follows:
1. Sample ``y`` of shape [``batch_size``] from a Gaussian distribution N(mu,std)
2. x = max(min(x, maxval), minval)
"""
x = np.random.normal(size=[batch_size])
x = np.maximum(x, minval)
x = np.minimum(x, maxval)
x = std*x+mu
return x
def setUpClass():
r"""Sample LSPs and pathlosses from all channel models for testing"""
# Forcing the seed to make the tests deterministic
tf.random.set_seed(42)
np.random.seed(42)
nb_bs = 1
fc = TestLSP.CARRIER_FREQUENCY
h_ut = TestLSP.H_UT
h_bs = TestLSP.H_BS
batch_size = TestLSP.BATCH_SIZE
nb_ut = TestLSP.NB_UT
# UT and BS arrays have no impact on LSP
# However, these are needed to instantiate the model
bs_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=2,
num_cols_per_panel=2,
polarization='dual',
polarization_type='VH',
antenna_pattern='38.901',
carrier_frequency=fc,
dtype=tf.complex128)
ut_array = sionna.channel.tr38901.PanelArray(num_rows_per_panel=1,
num_cols_per_panel=1,
polarization='dual',
polarization_type='VH',
antenna_pattern='38.901',
carrier_frequency=fc,
dtype=tf.complex128)
# The following quantities have no impact on LSP
# However,these are needed to instantiate the model
ut_orientations = tf.zeros([batch_size, nb_ut], dtype=tf.float64)
bs_orientations = tf.zeros([batch_size, nb_ut], dtype=tf.float64)
ut_velocities = tf.zeros([batch_size, nb_ut], dtype=tf.float64)
# LSPs, ZoD offset, pathlosses
TestLSP.lsp_samples = {}
TestLSP.zod_offset = {}
TestLSP.pathlosses = {}
TestLSP.los_prob = {}
ut_loc = generate_random_loc(batch_size, nb_ut, (100,2000),
(100,2000), (h_ut, h_ut),
share_loc=True, dtype=tf.float64)
bs_loc = generate_random_loc(batch_size, nb_bs, (0,100),
(0,100), (h_bs, h_bs),
share_loc=True, dtype=tf.float64)
####### RMa
TestLSP.lsp_samples['rma'] = {}
TestLSP.zod_offset['rma'] = {}
TestLSP.pathlosses['rma'] = {}
scenario = sionna.channel.tr38901.RMaScenario( fc,
ut_array,
bs_array,
"uplink",
dtype=tf.complex128)
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
# LoS
in_state = generate_random_bool(batch_size, nb_ut, 0.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state, True)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['rma']['los'] = lsp_sampler()
TestLSP.zod_offset['rma']['los'] = scenario.zod_offset
TestLSP.pathlosses['rma']['los'] = lsp_sampler.sample_pathloss()[:,0,:]
# NLoS
in_state = generate_random_bool(batch_size, nb_ut, 0.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state, False)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['rma']['nlos'] = lsp_sampler()
TestLSP.zod_offset['rma']['nlos'] = scenario.zod_offset
TestLSP.pathlosses['rma']['nlos'] = lsp_sampler.sample_pathloss()[:,0,:]
# Indoor
in_state = generate_random_bool(batch_size, nb_ut, 1.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['rma']['o2i'] = lsp_sampler()
TestLSP.zod_offset['rma']['o2i'] = scenario.zod_offset
TestLSP.pathlosses['rma']['o2i'] = lsp_sampler.sample_pathloss()[:,0,:]
TestLSP.los_prob['rma'] = scenario.los_probability.numpy()
TestLSP.rma_w = scenario.average_street_width
TestLSP.rma_h = scenario.average_building_height
####### UMi
TestLSP.lsp_samples['umi'] = {}
TestLSP.zod_offset['umi'] = {}
TestLSP.pathlosses['umi'] = {}
scenario = sionna.channel.tr38901.UMiScenario( fc,
'low',
ut_array,
bs_array,
"uplink",
dtype=tf.complex128)
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
# LoS
in_state = generate_random_bool(batch_size, nb_ut, 0.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state, True)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['umi']['los'] = lsp_sampler()
TestLSP.zod_offset['umi']['los'] = scenario.zod_offset
TestLSP.pathlosses['umi']['los'] = lsp_sampler.sample_pathloss()[:,0,:]
# NLoS
in_state = generate_random_bool(batch_size, nb_ut, 0.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state, False)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['umi']['nlos'] = lsp_sampler()
TestLSP.zod_offset['umi']['nlos'] = scenario.zod_offset
TestLSP.pathlosses['umi']['nlos'] = lsp_sampler.sample_pathloss()[:,0,:]
# Indoor
in_state = generate_random_bool(batch_size, nb_ut, 1.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['umi']['o2i'] = lsp_sampler()
TestLSP.zod_offset['umi']['o2i'] = scenario.zod_offset
TestLSP.pathlosses['umi']['o2i-low'] = lsp_sampler.sample_pathloss()[:,0,:]
TestLSP.los_prob['umi'] = scenario.los_probability.numpy()
####### UMa
TestLSP.lsp_samples['uma'] = {}
TestLSP.zod_offset['uma'] = {}
TestLSP.pathlosses['uma'] = {}
scenario = sionna.channel.tr38901.UMaScenario( fc,
'low',
ut_array,
bs_array,
"uplink",
dtype=tf.complex128)
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
# LoS
in_state = generate_random_bool(batch_size, nb_ut, 0.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state, True)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['uma']['los'] = lsp_sampler()
TestLSP.zod_offset['uma']['los'] = scenario.zod_offset
TestLSP.pathlosses['uma']['los'] = lsp_sampler.sample_pathloss()[:,0,:]
# NLoS
in_state = generate_random_bool(batch_size, nb_ut, 0.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state, False)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['uma']['nlos'] = lsp_sampler()
TestLSP.zod_offset['uma']['nlos'] = scenario.zod_offset
TestLSP.pathlosses['uma']['nlos'] = lsp_sampler.sample_pathloss()[:,0,:]
# Indoor
in_state = generate_random_bool(batch_size, nb_ut, 1.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state)
lsp_sampler.topology_updated_callback()
TestLSP.lsp_samples['uma']['o2i'] = lsp_sampler()
TestLSP.zod_offset['uma']['o2i'] = scenario.zod_offset
TestLSP.pathlosses['uma']['o2i-low'] = lsp_sampler.sample_pathloss()[:,0,:]
TestLSP.los_prob['uma'] = scenario.los_probability.numpy()
# Sample pathlosses with high O2I loss model. Only with UMi and UMa
####### UMi-High
scenario = sionna.channel.tr38901.UMiScenario( fc,
'high',
ut_array,
bs_array,
"uplink",
dtype=tf.complex128)
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
in_state = generate_random_bool(batch_size, nb_ut, 1.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state)
lsp_sampler.topology_updated_callback()
TestLSP.pathlosses['umi']['o2i-high'] = lsp_sampler.sample_pathloss()[:,0,:]
####### UMa-high
scenario = sionna.channel.tr38901.UMaScenario( fc,
'high',
ut_array,
bs_array,
"uplink",
dtype=tf.complex128)
lsp_sampler = sionna.channel.tr38901.LSPGenerator(scenario)
in_state = generate_random_bool(batch_size, nb_ut, 1.0)
scenario.set_topology(ut_loc, bs_loc, ut_orientations, bs_orientations,
ut_velocities, in_state)
lsp_sampler.topology_updated_callback()
TestLSP.pathlosses['uma']['o2i-high'] = lsp_sampler.sample_pathloss()[:,0,:]
# The following values do not depend on the scenario
TestLSP.d_2d = scenario.distance_2d.numpy()
TestLSP.d_2d_ut = scenario.matrix_ut_distance_2d.numpy()
TestLSP.d_2d_out = scenario.distance_2d_out.numpy()
TestLSP.d_3d = scenario.distance_3d[0,0,:].numpy()
@channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))
def test_ds_dist(self, model, submodel):
"""Test the distribution of LSP DS"""
samples = TestLSP.lsp_samples[model][submodel].ds[:,0,0].numpy()
samples = np.log10(samples)
mu, std = log10DS(model, submodel, TestLSP.CARRIER_FREQUENCY)
D,_ = kstest(samples, norm.cdf, args=(mu, std))
self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f"{model}:{submodel}")
@channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))
def test_asa_dist(self, model, submodel):
"""Test the distribution of LSP ASA"""
samples = TestLSP.lsp_samples[model][submodel].asa[:,0,0].numpy()
samples = np.log10(samples)
mu, std = log10ASA(model, submodel, TestLSP.CARRIER_FREQUENCY)
a = -np.inf
b = (np.log10(104)-mu)/std
samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)
# KS-test does not work great with discontinuties.
# Therefore, we test only the contunuous part of the CDF, and also test
# that the maximum value allowed is not exceeded
maxval = np.max(samples)
samples = samples[samples < np.log10(104)]
samples_ref = samples_ref[samples_ref < np.log10(104)]
D,_ = kstest(samples, samples_ref)
self.assertLessEqual(maxval, np.log10(104), f"{model}:{submodel}")
self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f"{model}:{submodel}")
@channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))
def test_asd_dist(self, model, submodel):
"""Test the distribution of LSP ASD"""
samples = TestLSP.lsp_samples[model][submodel].asd.numpy()
samples = np.log10(samples)
mu, std = log10ASD(model, submodel, TestLSP.CARRIER_FREQUENCY)
a = -np.inf
b = (np.log10(104)-mu)/std
samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)
# KS-test does not work great with discontinuties.
# Therefore, we test only the contunuous part of the CDF, and also test
# that the maximum value allowed is not exceeded
maxval = np.max(samples)
samples = samples[samples < np.log10(104)]
samples_ref = samples_ref[samples_ref < np.log10(104)]
D,_ = kstest(samples, samples_ref)
self.assertLessEqual(maxval, np.log10(104), f"{model}:{submodel}")
self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f"{model}:{submodel}")
@channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))
def test_zsa_dist(self, model, submodel):
"""Test the distribution of LSP ZSA"""
samples = TestLSP.lsp_samples[model][submodel].zsa[:,0,0].numpy()
samples = np.log10(samples)
mu, std = log10ZSA(model, submodel, TestLSP.CARRIER_FREQUENCY)
a = -np.inf
b = (np.log10(52)-mu)/std
samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)
# KS-test does not work great with discontinuties.
# Therefore, we test only the contunuous part of the CDF, and also test
# that the maximum value allowed is not exceeded
maxval = np.max(samples)
samples = samples[samples < np.log10(52)]
samples_ref = samples_ref[samples_ref < np.log10(52)]
D,_ = kstest(samples, samples_ref)
self.assertLessEqual(maxval, np.log10(52), f"{model}:{submodel}")
self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f"{model}:{submodel}")
@channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))
def test_zsd_dist(self, model, submodel):
"""Test the distribution of LSP ZSD"""
d_2d = TestLSP.d_2d[0,0,0]
samples = TestLSP.lsp_samples[model][submodel].zsd[:,0,0].numpy()
samples = np.log10(samples)
mu, std = log10ZSD(model, submodel, d_2d, TestLSP.CARRIER_FREQUENCY,
TestLSP.H_BS, TestLSP.H_UT)
a = -np.inf
b = (np.log10(52)-mu)/std
samples_ref = self.limited_normal(TestLSP.BATCH_SIZE, a, b, mu, std)
# KS-test does not work great with discontinuties.
# Therefore, we test only the contunuous part of the CDF, and also test
# that the maximum value allowed is not exceeded
maxval = np.max(samples)
samples = samples[samples < np.log10(52)]
samples_ref = samples_ref[samples_ref < np.log10(52)]
D,_ = kstest(samples, samples_ref)
self.assertLessEqual(maxval, np.log10(52), f"{model}:{submodel}")
self.assertLessEqual(D, TestLSP.MAX_ERR_KS, f"{model}:{submodel}")
@channel_test_on_models(('rma', 'umi', 'uma'), ('los', 'nlos', 'o2i'))
def test_sf_dist(self, | |
# script to make graph of connected components in a volume
import argparse
import logging
import pickle
from collections import defaultdict
from typing import Dict, Set, Tuple, Union
import cupy as cp
import cupyx.scipy.ndimage as cpnd
import h5py
import numpy as np
VolumeFile = Tuple[str, str]
ArrayTypes = Union[np.ndarray, cp.ndarray, h5py.Dataset, VolumeFile]
TILE_SIZE = np.array([500, 500, 500])
EROSION_STEPS = 10
FOREGROUND_THRESHOLD = 0.5
def read_h5(volume_file: str, data_label: str):
h5file = h5py.File(volume_file)
return h5file[data_label]
def get_unique(in_array: cp.ndarray):
"""
Find unique values in array. assume array is shape (x,y), and want to find all unique values over y (ie reduce
the y dimension)
:param cp.ndarray in_array: Input array of shape (x,y), which will be reduced over the y dimension
:returns: Array of unique values of in_array, shape (x, unique_y)
"""
sorted = cp.sort(in_array, axis=1)
new_values = (sorted[:, 1:] != sorted[:, :-1]).any(axis=0) # shape (y,)
# add first value as a new value
new_values_full = cp.concatenate([cp.array([1], dtype="bool"), new_values])
chosen_newvalues = sorted[:, new_values_full]
return chosen_newvalues
def process_tile(
tile_idx: Tuple[int, int, int],
h5array: h5py.Dataset,
assoc_map: defaultdict,
tile_edges: Dict,
tile_components: Dict,
):
"""
Find content for given tile. extend boundary past reference region in order to
allow accurate erosion result within the reference region. assume that erosion
uses a filter of size 3x3, which requires 1 pixel of surrounding region for each
erosion step
"""
tile_idx_arr = np.array(tile_idx)
reference_start = tile_idx_arr * TILE_SIZE
reference_max = reference_start + TILE_SIZE
extended_start = reference_start - EROSION_STEPS
extended_max = reference_max + EROSION_STEPS
# get extended region from data. to handle cases at the edges where the extended
# region is not populated by data, create a zero-filled array and then copy the
# defined region
extended_size = TILE_SIZE + 2 * EROSION_STEPS
extended_region = cp.zeros(extended_size, dtype=bool)
valid_start = np.maximum(extended_start, 0)
source_size = np.array(h5array.shape[1:])
valid_end = np.minimum(extended_max, source_size)
valid_data_raw = cp.array(
h5array[
0,
valid_start[0] : valid_end[0],
valid_start[1] : valid_end[1],
valid_start[2] : valid_end[2],
]
)
valid_data_bool = cp.greater_equal(valid_data_raw, FOREGROUND_THRESHOLD)
insert_start = np.maximum(-extended_start, 0)
insert_end = extended_size - np.maximum(extended_max - source_size, 0)
extended_region[
insert_start[0] : insert_end[0],
insert_start[1] : insert_end[1],
insert_start[2] : insert_end[2],
] = valid_data_bool
# produce eroded results
current_region = extended_region
erosion_results = [current_region]
for _ in range(EROSION_STEPS):
eroded_region = cpnd.binary_erosion(current_region)
erosion_results.append(eroded_region)
current_region = eroded_region
# find connected components for each erosion level
label_results = [
cpnd.label(erosion_result) for erosion_result in erosion_results
]
# find size and bounds of each component, and relationships between connected components in each level
tile_component_details = []
prev_label_array = None
for label_array, label_count in label_results:
level_component_details = {}
for label_num in range(1, label_count + 1):
value_mask = label_array == label_num
# find bounds
xvals, yvals, zvals = cp.where(value_mask)
bounds = cp.stack(
[
cp.min(xvals),
cp.max(xvals) + 1,
cp.min(yvals),
cp.max(yvals) + 1,
cp.min(zvals),
cp.max(zvals) + 1,
]
).get()
center = cp.array(
[
cp.mean(xvals),
cp.mean(yvals),
cp.mean(zvals),
]
).get()
size = int(cp.sum(value_mask))
# find parent as the component label in the previous erosion level. there should
# always be a unique parent component that covers all defined pixels for this component
# choose an arbitrary position within this region
if prev_label_array is None:
parent_component_num = None
else:
parent_component_num = prev_label_array[
xvals[0], yvals[0], zvals[0]
]
level_component_details[label_num] = (
bounds,
center,
size,
parent_component_num,
)
prev_label_array = label_array
tile_component_details.append(level_component_details)
tile_components[tile_idx] = tile_component_details
# find connections between tiles by comparing with preceding neighbours
for assoc in ["x", "y", "z"]:
if assoc == "x":
if tile_idx[0] == 0:
continue
prev_tile = tile_idx_arr - [1, 0, 0]
elif assoc == "y":
if tile_idx[:, 0] == 0:
continue
prev_tile = tile_idx_arr - [0, 1, 0]
elif assoc == "z":
if tile_idx[:, :, 0] == 0:
continue
prev_tile = tile_idx_arr - [0, 0, 1]
# get surfaces for matching previous tile, and remove from dict as it will no longer
# be needed
tile_pair = (prev_tile, tile_idx)
prev_surfaces = tile_edges.pop(tile_pair)
# level_associations = []
for level_num, ((label_array, label_num), prev_surface) in enumerate(
zip(label_results, prev_surfaces)
):
if assoc == "x":
this_surface = label_array[0, :, :]
elif assoc == "y":
this_surface = label_array[:, 0, :]
elif assoc == "z":
this_surface = label_array[:, :, 0]
joined_surfaces = cp.stack(
[prev_surface, this_surface]
) # shape (2, y, z)
joined_surfaces_flat = cp.reshape(joined_surfaces, (2, -1))
unique_pairs = get_unique(joined_surfaces_flat)
zero_mask = (unique_pairs == 0).any(axis=0)
nonzero_pairs = unique_pairs[
:, ~zero_mask
].T.get() # shape (unique_nonzero_vals, 2)
# find association pairs and record in bi-directional association map
for assoc_pair in nonzero_pairs:
# if (assoc_pair == 0).any():
# continue
prev_key = (prev_tile, level_num, int(assoc_pair[0]))
this_key = (tile_idx, level_num, int(assoc_pair[1]))
assoc_map[this_key].add(prev_key)
assoc_map[prev_key].add(this_key)
# level_associations.append(unique_pairs)
# # record associations
# component_associations[tile_pair] = level_associations
# record surfaces for following neighbours
neighbour_surfaces_x, neighbour_surfaces_y, neighbour_surfaces_z = (
[],
[],
[],
)
for label_array, label_num in label_results:
neighbour_surfaces_x = label_array[-1, :, :]
neighbour_surfaces_y = label_array[:, -1, :]
neighbour_surfaces_z = label_array[:, :, -1]
tile_edges[
(tile_idx, tuple(tile_idx_arr + [1, 0, 0]))
] = neighbour_surfaces_x
tile_edges[
(tile_idx, tuple(tile_idx_arr + [0, 1, 0]))
] = neighbour_surfaces_y
tile_edges[
(tile_idx, tuple(tile_idx_arr + [0, 0, 1]))
] = neighbour_surfaces_z
def find_volume_components(
volume_file: str,
outfile: str,
data_label: str,
):
"""
Find connected components at various erosion levels in the given volume
"""
# open file as HDF5
logging.info(
"Opening volume file %s with data label %s" % (volume_file, data_label)
)
h5array = read_h5(volume_file, data_label)
# initialise tile association maps
# component_associations maps from a tuple (prev_tile_idx, next_tile_idx) to a list over
# erosion levels, each an array of shape (2, connection_pairs) representing components that
# are connected between tiles.
# assoc_map maps from a tuple (tile_idx, level, id) to a set of connected tiles
# (other_tile_idx, level, other_id), as a bi-directional map of connections
# tile_edges is a map from a tuple (prev_tile_idx, next_tile_idx) to a list over
# erosion levels, each an array of shape (tile_size, tile_size) representing the surface of
# tile prev_tile_idx that adjoins tile next_tile_idx
# tile_components is a map from tile_idx to a list over erosion levels, each a dict mapping
# from each label number to a tuple of (bounds, center, size, parent_num). bounds and center
# are defined within the tile, size is the number of covered voxels within the tile, and
# parent_num is the component number in the previous erosion level within the tile (or None if
# erosion level is zero).
# component_associations = {}
assoc_map = defaultdict(set)
tile_edges = {}
tile_components = {}
# step over individual tiles and collect properties
dims = np.array(h5array.shape[1:])
tile_steps = np.ceil(dims / TILE_SIZE).astype("int")
for tile_x in range(tile_steps[0]):
for tile_y in range(tile_steps[1]):
for tile_z in range(tile_steps[1]):
tile_idx = (tile_x, tile_y, tile_z)
# process tile
process_tile(
tile_idx,
h5array,
assoc_map,
tile_edges,
tile_components,
)
# combine results
find_combined_components(tile_components, assoc_map, tile_steps, outfile)
def find_combined_components(
tile_components: Dict,
assoc_map: defaultdict,
tile_steps: np.ndarray,
outfile: str,
):
"""
Given a dictionary representing components within individual tiles, and associations between
components in different tiles, find global components by combining associated components from
different tiles and defining based on merged properties (eg size, center) in global coordinates.
Save results in output directory
:param Dict tile_components: Map from tile_idx to a list over erosion levels, each a dict mapping
from each label number to a tuple of (bounds, center, size, parent_num). bounds and center
are defined within the tile, size is the number of covered voxels within the tile, and
parent_num is the component number in the previous erosion level within the tile (or None if
erosion level is zero).
:param Dict component_associations: Map from a tuple (prev_tile_idx, next_tile_idx) to a list over
erosion levels, each an array of shape (2, connection_pairs) representing components that
are connected between tiles.
:param np.ndarray tile_steps: Number of tiles, shape (x, y, z)
:param str outfile: Output file to write global component results (as pickle)
"""
# global_components is a list over erosion levels, each a dict mapping from global component id
# to a tuple of (bounds, center, size, global_parent_num)
global_components = [{}] * (EROSION_STEPS + 1)
# global_id_map is a map from a tuple of (tile_idx, erosion_level, local_id) to global_id
global_id_map = {}
# first make | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python implementation of Poincaré Embeddings [1]_, an embedding that is better at capturing latent hierarchical
information than traditional Euclidean embeddings. The method is described in more detail in [1].
The main use-case is to automatically learn hierarchical representations of nodes from a tree-like structure,
such as a Directed Acyclic Graph, using a transitive closure of the relations. Representations of nodes in a
symmetric graph can also be learned, using an iterable of the direct relations in the graph.
This module allows training a Poincaré Embedding from a training file containing relations of graph in a
csv-like format, or a Python iterable of relations.
.. [1] <NAME>, <NAME> - "Poincaré Embeddings for Learning Hierarchical Representations"
https://arxiv.org/abs/1705.08039
Note: This implementation is inspired and extends the open-source Gensim implementation of Poincare Embeddings.
"""
from .dag_emb_model import *
try:
# skip autograd
raise ImportError()
from autograd import grad # Only required for optionally verifying gradients while training
from autograd import numpy as grad_np
AUTOGRAD_PRESENT = True
except ImportError:
AUTOGRAD_PRESENT = False
class PoincareModel(DAGEmbeddingModel):
"""Class for training, using and evaluating Poincare Embeddings.
The model can be stored/loaded via its :meth:`~hyperbolic.poincare_model.PoincareModel.save`
and :meth:`~hyperbolic.poincare_model.PoincareModel.load` methods, or stored/loaded in the word2vec format
via `model.kv.save_word2vec_format` and :meth:`~hyperbolic.poincare_model.PoincareKeyedVectors.load_word2vec_format`.
Note that training cannot be resumed from a model loaded via `load_word2vec_format`, if you wish to train further,
use :meth:`~hyperbolic.poincare_model.PoincareModel.save` and :meth:`~hyperbolic.poincare_model.PoincareModel.load`
methods instead.
"""
def __init__(self,
train_data,
dim=50,
init_range=(-0.0001, 0.0001),
lr=0.1,
opt='rsgd', # rsgd or exp_map
burn_in=10,
epsilon=1e-5,
seed=0,
logger=None,
num_negative=10,
### How to sample negatives for an edge (u,v)
neg_sampl_strategy='true_neg', # 'all' (all nodes for negative sampling) or 'true_neg' (only nodes not connected)
where_not_to_sample='ancestors', # both or ancestors or children. Has no effect if neg_sampl_strategy = 'all'.
neg_edges_attach='child', # How to form negative edges: 'parent' (u,v') or 'child' (u', v) or 'both'
always_v_in_neg=True, # always include the true edge (u,v) as negative.
neg_sampling_power=0.0, # 0 for uniform, 1 for unigram, 0.75 for word2vec
loss_type='nll', # 'nll', 'neg', 'maxmargin'
maxmargin_margin=1.0,
neg_r=2.0,
neg_t=1.0,
neg_mu=1.0, # Balancing factor between the positive and negative terms
):
"""Initialize and train a Poincare embedding model from an iterable of relations.
Parameters
----------
See DAGEmbeddingModel for other parameters.
epsilon : float, optional
Constant used for clipping embeddings below a norm of one.
Examples
--------
Initialize a model from a list:
>>> from poincare_model import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
Initialize a model from a file containing one relation per line:
>>> from poincare_model import PoincareModel
>>> from relations import Relations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(Relations(file_path, set()), negative=2)
See :class:`~hyperbolic.relations.Relations` for more options.
"""
print("###### PoincareModel has been initialized !!!!")
print(dict(
train_data = train_data,
dim = dim,
init_range = init_range,
lr = lr,
opt = opt,
burn_in = burn_in,
epsilon = epsilon,
seed = seed,
logger = logger,
num_negative = num_negative,
neg_sampl_strategy = neg_sampl_strategy,
where_not_to_sample = where_not_to_sample,
neg_edges_attach = neg_edges_attach,
always_v_in_neg = always_v_in_neg,
neg_sampling_power = neg_sampling_power,
loss_type = loss_type,
maxmargin_margin = maxmargin_margin,
neg_r = neg_r,
neg_t = neg_t,
neg_mu = neg_mu,
))
super().__init__(train_data=train_data,
dim=dim,
logger=logger,
init_range=init_range,
lr=lr,
opt=opt,
burn_in=burn_in,
seed=seed,
BatchClass=PoincareBatch,
KeyedVectorsClass=PoincareKeyedVectors,
num_negative=num_negative,
neg_sampl_strategy=neg_sampl_strategy,
where_not_to_sample=where_not_to_sample,
always_v_in_neg=always_v_in_neg,
neg_sampling_power=neg_sampling_power,
neg_edges_attach=neg_edges_attach)
self.epsilon = epsilon
assert self.opt in ['rsgd', 'exp_map']
self.loss_type = loss_type
assert self.loss_type in ['nll', 'neg', 'maxmargin']
self.maxmargin_margin = maxmargin_margin
self.neg_r = neg_r
self.neg_t = neg_t
self.neg_mu = neg_mu
def _clip_vectors(self, vectors):
"""Clip vectors to have a norm of less than one.
Parameters
----------
vectors : numpy.array
Can be 1-D,or 2-D (in which case the norm for each row is checked).
Returns
-------
numpy.array
Array with norms clipped below 1.
"""
# u__v_prime: MAP: 0.379;
# rank: 32.23
# u_prime__v: MAP: 0.896;
# rank: 1.80
# Our clipping
thresh = 1.0 - self.epsilon
one_d = len(vectors.shape) == 1
if one_d:
norm = np.linalg.norm(vectors)
if norm < thresh:
return vectors
else:
return thresh * vectors / norm
else:
norms = np.linalg.norm(vectors, axis=1)
if (norms < thresh).all():
return vectors
else:
vectors[norms >= thresh] *= (thresh / norms[norms >= thresh])[:, np.newaxis]
return vectors
# Old methods
# u__v_prime: rank: 32.23;
# MAP: 0.379
# u_prime__v: rank: 1.80;
# MAP: 0.896
# Our clipping
# thresh = 1.0 - self.epsilon
# one_d = len(vectors.shape) == 1
# if one_d:
# norm = np.linalg.norm(vectors)
# if norm < thresh:
# return vectors
# else:
# return vectors / (norm + self.epsilon)
# else:
# norms = np.linalg.norm(vectors, axis=1)
# if (norms < thresh).all():
# return vectors
# else:
# vectors[norms >= thresh] *= (1.0 / (norms[norms >= thresh] + self.epsilon))[:, np.newaxis]
# return vectors
# u__v_prime: MAP: 0.418;
# rank: 31.96
# u_prime__v: MAP: 0.872;
# rank: 2.06
## Original clipping
# one_d = len(vectors.shape) == 1
# threshold = 1 - self.epsilon
# if one_d:
# norm = np.linalg.norm(vectors)
# if norm < threshold:
# return vectors
# else:
# return vectors / norm - (np.sign(vectors) * self.epsilon)
# else:
# norms = np.linalg.norm(vectors, axis=1)
# if (norms < threshold).all():
# return vectors
# else:
# vectors[norms >= threshold] *= (threshold / norms[norms >= threshold])[:, np.newaxis]
# vectors[norms >= threshold] -= np.sign(vectors[norms >= threshold]) * self.epsilon
# return vectors
### For autograd
def _loss_fn(self, matrix, rels_reversed):
"""Given a numpy array with vectors for u, v and negative samples, computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
norm = grad_np.linalg.norm(vector_u)
all_norms = grad_np.linalg.norm(vectors_v, axis=1)
poincare_dists = grad_np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
if self.loss_type == 'nll':
return PoincareModel._nll_loss_fn(poincare_dists)
elif self.loss_type == 'neg':
return PoincareModel._neg_loss_fn(poincare_dists, self.neg_r, self.neg_t, self.neg_mu)
elif self.loss_type == 'maxmargin':
return PoincareModel._maxmargin_loss_fn(poincare_dists, self.maxmargin_margin)
else:
raise ValueError('Unknown loss type : ' + self.loss_type)
@staticmethod
def _nll_loss_fn(poincare_dists):
"""
Parameters
----------
poincare_dists : numpy.array
All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).
Returns
----------
log-likelihood loss function from the NIPS paper, Eq (6).
"""
exp_negative_distances = grad_np.exp(-poincare_dists)
# Remove the value for the true edge (u,v) from the partition function
# return -grad_np.log(exp_negative_distances[0] / (- exp_negative_distances[0] + exp_negative_distances.sum()))
return poincare_dists[0] + grad_np.log(exp_negative_distances[1:].sum())
@staticmethod
def _neg_loss_fn(poincare_dists, neg_r, neg_t, neg_mu):
# NEG loss function:
# loss = - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
positive_term = grad_np.log(1.0 + grad_np.exp((- neg_r + poincare_dists[0]) / neg_t))
negative_terms = grad_np.log(1.0 + grad_np.exp((neg_r - poincare_dists[1:]) / neg_t))
return positive_term + neg_mu * negative_terms.sum()
@staticmethod
def _maxmargin_loss_fn(poincare_dists, maxmargin_margin):
"""
Parameters
----------
poincare_dists : numpy.array
All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).
Returns
----------
max-margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
"""
positive_term = poincare_dists[0]
negative_terms = poincare_dists[1:]
return grad_np.maximum(0, maxmargin_margin + positive_term - negative_terms).sum()
class PoincareBatch(DAGEmbeddingBatch):
"""Compute Poincare distances, gradients and loss for a training batch.
Class for computing Poincare distances, gradients and loss for a training batch,
and storing intermediate state to avoid recomputing multiple times.
"""
def __init__(self,
vectors_u, # (1, dim, batch_size)
vectors_v, # (1 + neg_size, dim, batch_size)
indices_u,
indices_v,
rels_reversed,
poincare_model):
super().__init__(
vectors_u=vectors_u,
vectors_v=vectors_v,
indices_u=indices_u,
indices_v=indices_v,
rels_reversed=rels_reversed,
dag_embedding_model=None)
self.gamma = None
self.poincare_dists = None
self.euclidean_dists = None
self._distances_computed = False
self._distance_gradients_computed = False
self.distance_gradients_u = None
self.distance_gradients_v = None
self.loss_type = poincare_model.loss_type
self.maxmargin_margin = poincare_model.maxmargin_margin
self.neg_r = poincare_model.neg_r
self.neg_t = poincare_model.neg_t
self.neg_mu = poincare_model.neg_mu
def _compute_distances(self):
"""Compute and store norms, euclidean distances and poincare distances between input vectors."""
if self._distances_computed:
return
self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
self.gamma = 1 + 2 * ((self.euclidean_dists ** 2) / (self.one_minus_norms_sq_u * self.one_minus_norms_sq_v)) # (1 + neg_size, | |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
helps['cosmosdb create'] = """
type: command
short-summary: Create a new Azure Cosmos DB database account.
parameters:
- name: --locations
short-summary: Add a location to the Cosmos DB database account
long-summary: |
Usage: --locations KEY=VALUE [KEY=VALUE ...]
Required Keys: regionName, failoverPriority
Optional Key: isZoneRedundant
Default: single region account in the location of the specified resource group.
Failover priority values are 0 for write regions and greater than 0 for read regions. A failover priority value must be unique and less than the total number of regions.
Multiple locations can be specified by using more than one `--locations` argument.
- name: --databases-to-restore
short-summary: Add a database and its collection names to restore
long-summary: |
Usage: --databases-to-restore name=DatabaseName collections=collection1 [collection2 ...]
examples:
- name: Create a new Azure Cosmos DB database account.
text: az cosmosdb create --name MyCosmosDBDatabaseAccount --resource-group MyResourceGroup --subscription MySubscription
- name: Create a new Azure Cosmos DB database account with two regions. UK South is zone redundant.
text: az cosmosdb create -n myaccount -g mygroup --locations regionName=eastus failoverPriority=0 isZoneRedundant=False --locations regionName=uksouth failoverPriority=1 isZoneRedundant=True --enable-multiple-write-locations
- name: Create a new Azure Cosmos DB database account by restoring from an existing account in the given location
text: az cosmosdb create -n restoredaccount -g mygroup --is-restore-request true --restore-source /subscriptions/2296c272-5d55-40d9-bc05-4d56dc2d7588/providers/Microsoft.DocumentDB/locations/westus/restorableDatabaseAccounts/d056a4f8-044a-436f-80c8-cd3edbc94c68 --restore-timestamp 2020-07-13T16:03:41+0000 --locations regionName=westus failoverPriority=0 isZoneRedundant=False
"""
helps['cosmosdb restore'] = """
type: command
short-summary: Create a new Azure Cosmos DB database account by restoring from an existing database account.
parameters:
- name: --databases-to-restore
short-summary: Add a database and its collection names to restore
long-summary: |
Usage: --databases-to-restore name=DatabaseName collections=collection1 [collection2 ...]
Multiple databases can be specified by using more than one `--databases-to-restore` argument.
examples:
- name: Create a new Azure Cosmos DB database account by restoring from an existing database account.
text: az cosmosdb restore --target-database-account-name MyRestoredCosmosDBDatabaseAccount --account-name MySourceAccount --restore-timestamp 2020-07-13T16:03:41+0000 -g MyResourceGroup --location westus
- name: Create a new Azure Cosmos DB database account by restoring only the selected databases and collections from an existing database account.
text: az cosmosdb restore -g MyResourceGroup --target-database-account-name MyRestoredCosmosDBDatabaseAccount --account-name MySourceAccount --restore-timestamp 2020-07-13T16:03:41+0000 --location westus --databases-to-restore name=MyDB1 collections=collection1 collection2 --databases-to-restore name=MyDB2 collections=collection3 collection4
"""
helps['cosmosdb update'] = """
type: command
short-summary: Update an Azure Cosmos DB database account.
parameters:
- name: --locations
short-summary: Add a location to the Cosmos DB database account
long-summary: |
Usage: --locations KEY=VALUE [KEY=VALUE ...]
Required Keys: regionName, failoverPriority
Optional Key: isZoneRedundant
Default: single region account in the location of the specified resource group.
Failover priority values are 0 for write regions and greater than 0 for read regions. A failover priority value must be unique and less than the total number of regions.
Multiple locations can be specified by using more than one `--locations` argument.
examples:
- name: Update an Azure Cosmos DB database account.
text: az cosmosdb update --capabilities EnableGremlin --name MyCosmosDBDatabaseAccount --resource-group MyResourceGroup
- name: Update an new Azure Cosmos DB database account with two regions. UK South is zone redundant.
text: az cosmosdb update -n myaccount -g mygroup --locations regionName=eastus failoverPriority=0 isZoneRedundant=False --locations regionName=uksouth failoverPriority=1 isZoneRedundant=True --enable-multiple-write-locations
- name: Update the backup policy parameters of a database account with Periodic backup type.
text: az cosmosdb update -n myaccount -g mygroup --backup-interval 240 --backup-retention 24
"""
helps['cosmosdb restorable-database-account'] = """
type: group
short-summary: Manage restorable Azure Cosmos DB accounts.
"""
helps['cosmosdb restorable-database-account list'] = """
type: command
short-summary: List all the database accounts that can be restored.
"""
helps['cosmosdb restorable-database-account show'] = """
type: command
short-summary: Show the details of a database account that can be restored.
"""
helps['cosmosdb sql restorable-database'] = """
type: group
short-summary: Manage different versions of sql databases that are restorable in a Azure Cosmos DB account.
"""
helps['cosmosdb sql restorable-database list'] = """
type: command
short-summary: List all the versions of all the sql databases that were created / modified / deleted in the given restorable account.
"""
helps['cosmosdb sql restorable-container'] = """
type: group
short-summary: Manage different versions of sql containers that are restorable in a database of a Azure Cosmos DB account.
"""
helps['cosmosdb sql restorable-container list'] = """
type: command
short-summary: List all the versions of all the sql containers that were created / modified / deleted in the given database and restorable account.
"""
helps['cosmosdb sql restorable-resource'] = """
type: group
short-summary: Manage the databases and its containers that can be restored in the given account at the given timesamp and region.
"""
helps['cosmosdb sql restorable-resource list'] = """
type: command
short-summary: List all the databases and its containers that can be restored in the given account at the given timesamp and region.
"""
helps['cosmosdb mongodb restorable-database'] = """
type: group
short-summary: Manage different versions of mongodb databases that are restorable in a Azure Cosmos DB account.
"""
helps['cosmosdb mongodb restorable-database list'] = """
type: command
short-summary: List all the versions of all the mongodb databases that were created / modified / deleted in the given restorable account.
"""
helps['cosmosdb mongodb restorable-collection'] = """
type: group
short-summary: Manage different versions of mongodb collections that are restorable in a database of a Azure Cosmos DB account.
"""
helps['cosmosdb mongodb restorable-collection list'] = """
type: command
short-summary: List all the versions of all the mongodb collections that were created / modified / deleted in the given database and restorable account.
"""
helps['cosmosdb mongodb restorable-resource'] = """
type: group
short-summary: Manage the databases and its collections that can be restored in the given account at the given timesamp and region.
"""
helps['cosmosdb mongodb restorable-resource list'] = """
type: command
short-summary: List all the databases and its collections that can be restored in the given account at the given timesamp and region.
"""
helps['cosmosdb sql role'] = """
type: group
short-summary: Manage Azure Cosmos DB SQL role resources.
"""
helps['cosmosdb sql role definition'] = """
type: group
short-summary: Manage Azure Cosmos DB SQL role definitions.
"""
helps['cosmosdb sql role definition create'] = """
type: command
short-summary: Create a SQL role definition under an Azure Cosmos DB account.
examples:
- name: Create a SQL role definition under an Azure Cosmos DB account using a JSON string.
text: |
az cosmosdb sql role definition create --account-name MyAccount --resource-group MyResourceGroup --body '{
"Id": "be79875a-2cc4-40d5-8958-566017875b39",
"RoleName": "My Read Only Role",
"Type": "CustomRole",
"AssignableScopes": ["/dbs/mydb/colls/mycontainer"],
"Permissions": [{
"DataActions": [
"Microsoft.DocumentDB/databaseAccounts/readMetadata",
"Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers/items/read",
"Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers/executeQuery",
"Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers/readChangeFeed"
]
}]
}'
- name: Create a SQL role definition under an Azure Cosmos DB account using a JSON file.
text: az cosmosdb sql role definition create --account-name MyAccount --resource-group MyResourceGroup --body @role-definition.json
"""
helps['cosmosdb sql role definition delete'] = """
type: command
short-summary: Delete a SQL role definition under an Azure Cosmos DB account.
examples:
- name: Create a SQL role definition under an Azure Cosmos DB account.
text: az cosmosdb sql role definition delete --account-name MyAccount --resource-group MyResourceGroup --id be79875a-2cc4-40d5-8958-566017875b39
"""
helps['cosmosdb sql role definition exists'] = """
type: command
short-summary: Check if an Azure Cosmos DB role definition exists.
examples:
- name: Check if an Azure Cosmos DB role definition exists.
text: az cosmosdb sql role definition exists --account-name MyAccount --resource-group MyResourceGroup --id be79875a-2cc4-40d5-8958-566017875b39
"""
helps['cosmosdb sql role definition list'] = """
type: command
short-summary: List all SQL role definitions under an Azure Cosmos DB account.
examples:
- name: List all SQL role definitions under an Azure Cosmos DB account.
text: az cosmosdb sql role definition list --account-name MyAccount --resource-group MyResourceGroup
"""
helps['cosmosdb sql role definition show'] = """
type: command
short-summary: Show the properties of a SQL role definition under an Azure Cosmos DB account.
examples:
- name: Show the properties of a SQL role definition under an Azure Cosmos DB account.
text: az cosmosdb sql role definition show --account-name MyAccount --resource-group MyResourceGroup --id be79875a-2cc4-40d5-8958-566017875b39
"""
helps['cosmosdb sql role definition update'] = """
type: command
short-summary: Update a SQL role definition under an Azure Cosmos DB account.
examples:
- name: Update a SQL role definition under an Azure Cosmos DB account.
text: az cosmosdb sql role definition update --account-name MyAccount --resource-group MyResourceGroup --body @role-definition.json
"""
helps['cosmosdb sql role assignment'] = """
type: group
short-summary: Manage Azure Cosmos DB SQL role assignments.
"""
helps['cosmosdb sql role assignment create'] = """
type: command
short-summary: Create a SQL role assignment under an Azure Cosmos DB account.
examples:
- name: Create a SQL role assignment under an Azure Cosmos DB account.
text: |
az cosmosdb sql role assignment create --account-name | |
mask = None
else: # todo create faster fill func without masking
mask = com.mask_missing(transf(values), missing)
if method == 'pad':
com.pad_2d(transf(values), limit=limit, mask=mask)
else:
com.backfill_2d(transf(values), limit=limit, mask=mask)
return make_block(values, self.items, self.ref_items)
def take(self, indexer, axis=1):
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis=axis,
allow_fill=False)
return make_block(new_values, self.items, self.ref_items)
def get_values(self, dtype):
return self.values
def diff(self, n):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=1)
return make_block(new_values, self.items, self.ref_items)
def shift(self, indexer, periods):
""" shift the block by periods, possibly upcast """
new_values = self.values.take(indexer, axis=1)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:, :periods] = fill_value
else:
new_values[:, periods:] = fill_value
return make_block(new_values, self.items, self.ref_items)
def eval(self, func, other, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
axis = getattr(other, '_het_axis', 0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
values, other = self._try_coerce_args(values, other)
args = [ values, other ]
try:
result = self._try_coerce_result(func(*args))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(other),str(detail)))
else:
# return the values
result = np.empty(values.shape,dtype='O')
result.fill(np.nan)
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, self.items, self.ref_items)
def where(self, other, cond, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other,'reindex_axis'):
axis = getattr(other,'_het_axis',0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond,'shape'):
raise ValueError("where must have a condition that is ndarray like")
if hasattr(cond,'reindex_axis'):
axis = getattr(cond,'_het_axis',0)
cond = cond.reindex_axis(self.items, axis=axis, copy=True).values
else:
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
# our where function
def func(c,v,o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(expressions.where(c, v, o, raise_on_error=True))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(o),str(detail)))
else:
# return the values
result = np.empty(v.shape,dtype='float64')
result.fill(np.nan)
return result
def create_block(result, items, transpose = True):
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if transpose and is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, items, self.ref_items)
# see if we can operate on the entire block, or need item-by-item
if not self._can_hold_na:
axis = cond.ndim-1
result_blocks = []
for item in self.items:
loc = self.items.get_loc(item)
item = self.items.take([loc])
v = values.take([loc],axis=axis)
c = cond.take([loc],axis=axis)
o = other.take([loc],axis=axis) if hasattr(other,'shape') else other
result = func(c,v,o)
if len(result) == 1:
result = np.repeat(result,self.shape[1:])
result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
result_blocks.append(create_block(result, item, transpose = False))
return result_blocks
else:
result = func(cond,values,other)
return create_block(result, self.items)
class NumericBlock(Block):
is_numeric = True
_can_hold_na = True
def _try_cast_result(self, result):
return _possibly_downcast_to_dtype(result, self.dtype)
class FloatBlock(NumericBlock):
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if isinstance(element, np.ndarray):
return issubclass(element.dtype.type, (np.floating, np.integer))
return isinstance(element, (float, int))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
if float_format:
imask = (-mask).ravel()
values.flat[imask] = np.array([ float_format % val for val in values.ravel()[imask] ])
return values.tolist()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
class ComplexBlock(NumericBlock):
def _can_hold_element(self, element):
return isinstance(element, complex)
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
_can_hold_na = False
def _can_hold_element(self, element):
if isinstance(element, np.ndarray):
return issubclass(element.dtype.type, np.integer)
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
is_object = True
_can_hold_na = True
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type object """
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates = True, convert_numeric = True, copy = True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
for i, c in enumerate(self.items):
values = self.get(c)
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
values = _block_shape(values)
items = self.items.take([i])
newb = make_block(values, items, self.ref_items)
blocks.append(newb)
return blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_))
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
class DatetimeBlock(Block):
_can_hold_na = True
def __init__(self, values, items, ref_items, ndim=2):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
Block.__init__(self, values, items, ref_items, ndim=ndim)
def _gi(self, arg):
return lib.Timestamp(self.values[arg])
def _can_hold_element(self, element):
return com.is_integer(element) or isinstance(element, datetime)
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments
we are going to compare vs i8, so coerce to integer
values is always ndarra like, other may not be """
values = values.view('i8')
if isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif isnull(other):
other = tslib.iNaT
else:
other = other.view('i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype == 'i8':
result = tslib.array_to_datetime(result.astype(object).ravel()).reshape(result.shape)
elif isinstance(result, np.integer):
result = lib.Timestamp(result)
return result
def to_native_types(self, slicer=None, na_rep=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
mask = isnull(values)
rvalues = np.empty(self.shape,dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (-mask).ravel()
if self.dtype == 'datetime64[ns]':
rvalues.flat[imask] = np.array([ Timestamp(val)._repr_base for val in values.ravel()[imask] ],dtype=object)
elif self.dtype == 'timedelta64[ns]':
rvalues.flat[imask] = np.array([ lib.repr_timedelta64(val) for val in values.ravel()[imask] ],dtype=object)
return rvalues.tolist()
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
if value.dtype != _NS_DTYPE:
value = tslib.cast_to_nanoseconds(value)
self.values[loc] = value
def get_values(self, dtype):
if dtype == object:
flat_i8 = self.values.ravel().view(np.int64)
res = tslib.ints_to_pydatetime(flat_i8)
return res.reshape(self.values.shape)
return self.values
def | |
<filename>inferelator_prior/motifs/_motif.py<gh_stars>1-10
import numpy as np
import pandas as pd
import warnings
import os
import shutil
import tempfile
import itertools
import pathos
from collections import Counter
from inferelator_prior.processor.bedtools import (extract_bed_sequence, intersect_bed, load_bed_to_bedtools,
BED_CHROMOSOME)
from inferelator_prior.processor.gtf import get_fasta_lengths, check_chromosomes_match
INFO_COL = "Information_Content"
ENTROPY_COL = "Shannon_Entropy"
OCC_COL = "Occurrence"
LEN_COL = "Length"
MOTIF_COL = "Motif_ID"
MOTIF_NAME_COL = "Motif_Name"
MOTIF_OBJ_COL = "Motif_Object"
MOTIF_CONSENSUS_COL = "Consensus"
MOTIF_ORIGINAL_NAME_COL = 'Motif_Name_Original'
SCAN_SCORE_COL = "Inferelator_Score"
SCORE_PER_BASE = "Per Base Array"
DEGEN_LOOKUP = {frozenset(("A", "T")): "W",
frozenset(("A", "C")): "M",
frozenset(("A", "G")): "R",
frozenset(("C", "G")): "S",
frozenset(("C", "T")): "Y",
frozenset(("G", "T")): "K",
frozenset("A"): "A",
frozenset("T"): "T",
frozenset("G"): "G",
frozenset("C"): "C"}
class Motif:
motif_id = None
motif_name = None
motif_url = None
_motif_probs = None
_motif_counts = None
_motif_prob_array = None
_motif_alphabet = None
_motif_background = None
_motif_species = None
_motif_accession = None
_alphabet_map = None
_consensus_seq = None
_consensus_seq_degen = None
_info_matrix = None
_homer_odds = None
@property
def alphabet(self):
return self._motif_alphabet
@alphabet.setter
def alphabet(self, new_alphabet):
if new_alphabet is not None:
self._motif_alphabet = np.array(new_alphabet)
self._alphabet_map = {ch.lower(): i for i, ch in enumerate(self._motif_alphabet)}
@property
def accession(self):
return self._motif_accession
@accession.setter
def accession(self, new_accession):
if new_accession is not None:
self._motif_accession = new_accession
@property
def id(self):
return self.motif_id
@id.setter
def id(self, new_id):
if new_id is not None:
self.motif_id = new_id
@property
def name(self):
return self.motif_name
@name.setter
def name(self, new_name):
if new_name is not None:
self.motif_name = new_name
@property
def alphabet_len(self):
return len(self._motif_alphabet)
@property
def background(self):
if self._motif_background is None:
self._motif_background = np.array([[1 / self.alphabet_len] * self.alphabet_len])
return self._motif_background
@property
def probability_matrix(self):
if self._motif_prob_array is None and len(self._motif_probs) == 0:
return None
if self._motif_prob_array is None or self._motif_prob_array.shape[0] < len(self._motif_probs):
self._motif_prob_array = np.array(self._motif_probs)
return self._motif_prob_array
@probability_matrix.setter
def probability_matrix(self, matrix):
self._motif_prob_array = matrix
@property
def count_matrix(self):
return np.array(self._motif_counts) if self._motif_counts is not None else None
@property
def shannon_entropy(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Calculate -1 * p log p and set to 0 where p is already 0
entropy = np.multiply(self.probability_matrix, np.log2(self.probability_matrix))
entropy[~np.isfinite(entropy)] = 0
entropy *= -1
return np.sum(entropy)
@property
def information_content(self):
if self.probability_matrix is None:
return 0
return np.sum(self.ic_matrix)
@property
def homer_odds(self):
return self.threshold_ln_odds if self._homer_odds is None else self._homer_odds
@homer_odds.setter
def homer_odds(self, val):
self._homer_odds = val
@property
def ic_matrix(self):
if self.probability_matrix is None:
return None
if self._info_matrix is None or self._info_matrix.shape != self.probability_matrix.shape:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
# Calculate p log (p/background)
self._info_matrix = np.divide(self.probability_matrix, self.background.reshape(1, -1))
self._info_matrix = np.multiply(self.probability_matrix, np.log2(self._info_matrix))
self._info_matrix[~np.isfinite(self._info_matrix)] = 0.
self._info_matrix = np.maximum(self._info_matrix, 0.)
return self._info_matrix
@property
def expected_occurrence_rate(self):
return int(2 ** self.information_content)
@property
def consensus(self):
if self._consensus_seq is None:
self._consensus_seq = "".join(np.apply_along_axis(lambda x: self.alphabet[x.argmax()], axis=1,
arr=self.probability_matrix))
return self._consensus_seq
@property
def consensus_degen(self):
if self._consensus_seq_degen is None:
def _csdegen(x):
return DEGEN_LOOKUP[frozenset(self.alphabet[x >= 0.35])] if np.sum(x >= 0.35) > 0 else "N"
self._consensus_seq_degen = "".join(np.apply_along_axis(_csdegen, axis=1, arr=self.probability_matrix))
return self._consensus_seq_degen
@property
def max_ln_odds(self):
max_ln_odd = np.log(np.amax(self.probability_matrix, axis=1) / 0.25)
return np.sum(max_ln_odd)
@property
def threshold_ln_odds(self):
second_prob = np.sort(self.probability_matrix, axis=1)[:, 2]
return self.max_ln_odds - max((np.sum(np.log(second_prob[second_prob > 0.25] / 0.25)), 0.1 * self.max_ln_odds))
@property
def species(self):
return self._motif_species
@species.setter
def species(self, new_species):
is_list = isinstance(new_species, (list, tuple))
if is_list and self._motif_species is None:
self._motif_species = new_species
elif is_list:
self._motif_species.extend(new_species)
elif self._motif_species is None:
self._motif_species = [new_species]
else:
self._motif_species.append(new_species)
def __len__(self):
return self.probability_matrix.shape[0] if self.probability_matrix is not None else 0
def __str__(self):
return "{mid} {mname}: Width {el} IC {ic:.2f} bits".format(mid=self.motif_id,
mname=self.motif_name,
el=len(self),
ic=self.information_content)
def __eq__(self, other):
try:
return np.allclose(self.probability_matrix, other.probability_matrix) \
and (self.motif_id == other.motif_id) and (self.motif_name == other.motif_name)
except AttributeError:
pass
try:
return self.motif_name == other
except TypeError:
pass
return False
def __init__(self, motif_id=None, motif_name=None, motif_alphabet=None, motif_background=None):
self.id = motif_id
self.name = motif_name
self.alphabet = np.array(motif_alphabet) if motif_alphabet is not None else None
self._motif_background = motif_background
self._motif_probs = []
def add_prob_line(self, line):
self._motif_probs.append(line)
def add_count_line(self, line):
if self._motif_counts is not None:
self._motif_counts.append(line)
else:
self._motif_counts = [line]
def score_match(self, match, disallow_homopolymer=True, homopolymer_one_off_len=6, score_zero_as_zero=None):
if len(match) != len(self):
msg = "Sequence length {l} not compatible with motif length {m}".format(l=len(match), m=len(self))
raise ValueError(msg)
# Score anything that's a homopolymer to 0 if the flag is set
if disallow_homopolymer and sum([m == match[0] for m in match]) == len(match):
return 0
# Score anything that's one base from a homopolymer to 0 if the flag is set
if disallow_homopolymer and (len(match) > homopolymer_one_off_len and
sum([min((c, 2)) for c in Counter(match).values()]) < 4):
return 0
# Score anything with excessive nucleotides that have a p ~ 0.0 as 0
if score_zero_as_zero is not None and sum(p < 0.001 for p in self._prob_match(match)) > score_zero_as_zero:
return 0
mse_ic = np.sum(np.square(np.subtract(self._info_match(self.consensus), self._info_match(match))))
return max((np.sum(self._info_match(match)) - mse_ic, 0.))
def truncate(self, threshold=0.35):
threshold = np.max(self.probability_matrix, axis=1) > threshold
keepers = (threshold.cumsum() > 0) & (threshold[::-1].cumsum()[::-1] > 0)
self.probability_matrix = self.probability_matrix[keepers, :]
self._motif_probs = list(itertools.compress(self._motif_probs, keepers))
def _prob_match(self, match):
return [self.probability_matrix[i, self._alphabet_map[ch.lower()]] for i, ch in enumerate(match)]
def _info_match(self, match):
return [self.ic_matrix[i, self._alphabet_map[ch.lower()]] for i, ch in enumerate(match)]
def species_contains(self, match_str):
if self.species is not None:
match_str = match_str.lower()
return any(match_str in s.lower() for s in self.species)
else:
return False
def shuffle(self, rng=None, random_seed=42):
"""
Shuffles per-base probabilities
"""
if rng is not None:
rng.shuffle(self.probability_matrix.T)
else:
np.random.default_rng(random_seed).shuffle(self.probability_matrix.T)
class MotifScanner:
scanner_name = None
def __init__(self, motif_file=None, motifs=None, num_workers=4):
if (motif_file is None and motifs is None) or (motif_file is not None and motifs is not None):
raise ValueError("One of meme_file or motifs must be passed")
self.motif_file = motif_file
self.motifs = motifs
self.num_workers = num_workers
def scan(self, genome_fasta_file=None, constraint_bed_file=None, promoter_bed=None, min_ic=None, threshold=None,
valid_fasta_chromosomes=None, debug=False, extracted_genome=None):
"""
"""
# Preprocess motifs into a list of temp chunk files
motif_files = self._preprocess(min_ic=min_ic)
# Unpack list to a dict for convenience
self.motifs = {mot.motif_id: mot for mot in self.motifs}
try:
if extracted_genome is None:
extracted_fasta_file = self.extract_genome(genome_fasta_file, constraint_bed_file, promoter_bed,
valid_fasta_chromosomes, debug)
try:
motif_data = self._scan_extract(motif_files, extracted_fasta_file, threshold=threshold)
return self._postprocess(motif_data)
finally:
try:
os.remove(extracted_fasta_file)
except FileNotFoundError:
pass
else:
motif_data = self._scan_extract(motif_files, extracted_genome, threshold=threshold)
return self._postprocess(motif_data)
finally:
for file in motif_files:
try:
os.remove(file)
except FileNotFoundError:
pass
@staticmethod
def extract_genome(genome_fasta_file, constraint_bed_file=None, promoter_bed=None,
valid_fasta_chromosomes=None, debug=False):
if valid_fasta_chromosomes is None:
_chr_lens = get_fasta_lengths(genome_fasta_file)
valid_fasta_chromosomes = list(_chr_lens.keys())
con_bed_file = load_bed_to_bedtools(constraint_bed_file) if constraint_bed_file is not None else None
pro_bed_file = load_bed_to_bedtools(promoter_bed) if promoter_bed is not None else None
if con_bed_file is not None and valid_fasta_chromosomes is not None:
check_chromosomes_match(con_bed_file.to_dataframe(), valid_fasta_chromosomes,
chromosome_column=BED_CHROMOSOME, file_name=constraint_bed_file)
if debug:
MotifScanner._print_bed_summary(con_bed_file, constraint_bed_file)
if pro_bed_file is not None and valid_fasta_chromosomes is not None:
check_chromosomes_match(pro_bed_file.to_dataframe(), valid_fasta_chromosomes,
chromosome_column=BED_CHROMOSOME, file_name=pro_bed_file)
if debug:
MotifScanner._print_bed_summary(pro_bed_file, promoter_bed)
if con_bed_file is not None and pro_bed_file is not None:
bed_file = intersect_bed(load_bed_to_bedtools(constraint_bed_file), load_bed_to_bedtools(promoter_bed))
elif con_bed_file is not None:
bed_file = con_bed_file
elif pro_bed_file is not None:
bed_file = pro_bed_file
else:
extracted_fasta_file = tempfile.mkstemp(suffix=".fasta")[1]
shutil.copy2(genome_fasta_file, extracted_fasta_file)
return extracted_fasta_file
extracted_fasta_file = extract_bed_sequence(bed_file, genome_fasta_file)
return extracted_fasta_file
def _scan_extract(self, motif_files, extracted_fasta_file, threshold=None, parse_genomic_coord=True):
# If the number of workers is 1, run fimo directly
if (self.num_workers == 1) or (len(motif_files) == 1):
assert len(motif_files) == 1
return self._get_motifs(extracted_fasta_file, motif_files[0], threshold=threshold,
parse_genomic_coord=parse_genomic_coord)
# Otherwise parallelize with a process pool (pathos because dill will do local functions)
else:
# Convenience local function
n = len(motif_files)
def _get_chunk_motifs(i, chunk_file):
print("Launching {name} scanner [{i} / {n}]".format(name=self.scanner_name, i=i + 1, n=n))
results = self._get_motifs(extracted_fasta_file, chunk_file, threshold=threshold,
parse_genomic_coord=parse_genomic_coord)
print("Scanning completed [{i} / {n}]".format(i=i + 1, n=n))
return results
with pathos.multiprocessing.Pool(self.num_workers) as pool:
motif_data = [data for data in pool.starmap(_get_chunk_motifs, enumerate(motif_files))]
motif_data = pd.concat(motif_data)
return motif_data
def _preprocess(self, min_ic=None):
raise NotImplementedError
def _postprocess(self, motif_peaks):
raise NotImplementedError
def _get_motifs(self, fasta_file, motif_file, threshold=None, parse_genomic_coord=True):
raise NotImplementedError
def _parse_output(self, output_handle):
raise NotImplementedError
@staticmethod
def _print_bed_summary(bedtools_obj, bed_file_name):
for chromosome, ct in bedtools_obj.to_dataframe()[BED_CHROMOSOME].value_counts().iteritems():
print("BED File ({f}) parsing complete:".format(f=bed_file_name))
print("\tChromosome {c}: {n} intervals found".format(c=chromosome, n=ct))
def motifs_to_dataframe(motifs):
entropy = list(map(lambda x: x.shannon_entropy, motifs))
occurrence = list(map(lambda x: x.expected_occurrence_rate, motifs))
info = list(map(lambda x: x.information_content, motifs))
ids = list(map(lambda x: x.motif_id, motifs))
names = list(map(lambda x: x.motif_name, motifs))
cons = list(map(lambda x: x.consensus_degen, motifs))
df = pd.DataFrame(
zip(ids, names, info, entropy, occurrence, list(map(lambda x: len(x), motifs)), motifs, cons),
index=list(map(lambda x: x.motif_name, motifs)),
columns=[MOTIF_COL, MOTIF_NAME_COL, INFO_COL, ENTROPY_COL, OCC_COL, LEN_COL, MOTIF_OBJ_COL, MOTIF_CONSENSUS_COL]
)
return df
def select_motifs(motifs, regulator_constraint_list):
"""
Keep only motifs for TFs in a list. Case-insensitive.
:param motifs: A list of motif objects
:type motifs: list[Motif]
:param | |
# -*- coding: utf-8 -*-
"""
Javelin Web2Py Admin Controller
"""
# metadata
__author__ = "<NAME>"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/12/2013"
__email__ = "<EMAIL>"
__data__ = {'name' : 'jadmin', 'label' : 'Admin', 'description' : 'Only accessible to admins',
'icon' : 'briefcase', 'u-icon' : u'\uf0b1', 'color':'orange', 'required' : True}
import time
from datetime import datetime
from applications.javelin.ctr_data import ctr_enabled, get_ctr_data
from gluon.contrib import simplejson as json
from gluon.tools import Service
from gluon.storage import Storage
service = Service(globals())
DOC_TYPES = Storage(
CALLSLIP=Storage(value=0, label="Call Slips"),
ATTSHEETS=Storage(value=1, label="Attendance Sheets"),
NAMETAGS=Storage(value=2, label="Nametags")
)
@auth.requires_login()
@auth.requires_membership('admin')
def index():
"""Loads the index page for the 'Admin' controller
:returns: a dictionary to pass to the view with the list of ctr_enabled and the active module ('admin')
"""
ctr_data = get_ctr_data()
users = db().select(db.auth_user.ALL)
approvals = db(db.auth_user.registration_key=='pending').select(db.auth_user.ALL)
return dict(ctr_enabled=ctr_enabled, ctr_data=ctr_data, active_module='jadmin', users=users, approvals=approvals, doctypes=DOC_TYPES)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def create_doc(doctype, data):
logger.debug("CREATE DOC CALLED")
import StringIO
from reportlab.platypus import SimpleDocTemplate, Paragraph, Table, TableStyle, Image, Spacer
from reportlab.platypus.flowables import PageBreak
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.enums import TA_CENTER, TA_LEFT
from reportlab.lib.pagesizes import letter, inch
from reportlab.lib import colors
io = StringIO.StringIO()
doc = SimpleDocTemplate(io, pagesize=letter,
rightMargin=0.18*inch, leftMargin=0.18*inch, topMargin=0.18*inch, bottomMargin=0)
elements = list()
doctype = int(doctype)
if data: data = json.loads(data)
if doctype == DOC_TYPES.CALLSLIP.value:
doc_title = "Call_Slips"
people = data['people']
message = data['message']
persons = list()
for p in people:
if p.startswith('group_'):
group = db(db.group_rec.group_id==p.replace('group_', '')).select(db.person.id,
join=db.group_rec.on(db.person.id==db.group_rec.person_id))
for g in group:
if g.id not in persons:
persons.append(g.id)
elif p.startswith('grade_'):
grade = db(db.person.grade==p.replace('grade_', '')).select(db.person.id)
for g in grade:
if g.id not in persons:
persons.append(g.id)
elif p == 'all_leaders':
leaders = db(db.person.leader==True).select(db.person.id)
for l in leaders:
if l.id not in persons:
persons.append(l.id)
elif p == 'all_people':
allpeople = db().select(db.person.id)
for a in allpeople:
if a.id not in persons:
persons.append(a.id)
else:
if p not in persons:
persons.append(p)
people = [Storage(id=pid, last_name=db(db.person.id==pid).select(db.person.last_name).first().last_name,
first_name=db(db.person.id==pid).select(db.person.first_name).first().first_name,
courses=['{}: {}'.format(c.period, c.room) for c in db().select(db.course.period, db.course.room,
join=db.course_rec.on((db.course.id==db.course_rec.course_id) & (db.course_rec.student_id==pid)),
orderby=db.course.period)]
) for pid in persons]
i = 0
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
tableStyle = TableStyle([('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black)])
page = list()
for person in people:
page.append([Paragraph("<para alignment='left'><br></para>" +\
"<para alignment='center'><font face='Times-Bold' size=16>Vintage Crusher Crew</font><br><br><br></para>" +\
"<para alignment='left'><font face='Times' size=14><b>Name:</b> {} {}</font><br><br></para>".format(person.first_name, person.last_name) +\
"<para alignment='left'><font face='Times' size=12><b>Rooms:</b> {}</font><br><br></para>".format(', '.join(person.courses)) +\
"<para alignment='left'><font face='Times' size=12><b>Message:</b></font><br></para>" +\
"<para alignment='left'><font face='Times' size=12>{}</font></para>".format(message), leftStyle)])
i = (i+1)%4
if i == 0:
table = Table(page, colWidths=[8*inch], rowHeights=[2.5*inch]*len(page))
table.setStyle(tableStyle)
elements.append(table)
elements.append(PageBreak())
page = list()
elif doctype == DOC_TYPES.ATTSHEETS.value:
pass
elif doctype == DOC_TYPES.NAMETAGS.value:
people = data['people']
event_name = data['event_name']
events = data['events']
present = data['present']
persons = list()
for p in people:
if p.startswith('group_'):
group = db(db.group_rec.group_id==p.replace('group_', '')).select(db.person.id,
join=db.group_rec.on(db.person.id==db.group_rec.person_id))
for g in group:
if g.id not in persons:
persons.append(g.id)
elif p.startswith('grade_'):
grade = db(db.person.grade==p.replace('grade_', '')).select(db.person.id)
for g in grade:
if g.id not in persons:
persons.append(g.id)
elif p == 'all_leaders':
leaders = db(db.person.leader==True).select(db.person.id)
for l in leaders:
if l.id not in persons:
persons.append(l.id)
elif p == 'all_people':
allpeople = db().select(db.person.id)
for a in allpeople:
if a.id not in persons:
persons.append(a.id)
else:
if p not in persons:
persons.append(p)
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
tableStyle = TableStyle([('VALIGN',(0,-1),(-1,-1),'TOP')])
label_num = 0
row_num = 0
labels = list()
for pid in persons:
row = db(db.person.id==pid).select(db.person.ALL).first()
label = list()
if label_num == 2:
table = Table([labels], colWidths=[4*inch,0.14*inch,4*inch], rowHeights=[2*inch]*(len(labels)/2))
table.setStyle(tableStyle)
elements.append(table)
label_num = 0
labels = list()
row_num += 1
if row_num == 5:
row_num = 0
elements.append(PageBreak())
header = Paragraph("<font face='Times-Bold' size=11>{} {}</font>".format(year, event_name), centerStyle)
label.append(header)
label.append(Spacer(1,11))
firstName = Paragraph("<font face='Times-Bold' size=18>{}</font>".format(row.first_name), centerStyle)
label.append(firstName)
label.append(Spacer(1, 11))
lastName = Paragraph("<font face='Times-Roman' size=11>{}</font>".format(row.last_name), centerStyle)
label.append(lastName)
label.append(Spacer(1,20))
# if row.crew.wefsk != '' or row.crew.wefsk != None or row.crew.wefsk != 'N/A':
# try:
# rooms = rotation(row.crew.wefsk.split('-')[0], row.crew.wefsk.split('-')[1])
# except:
# rooms = 'N/A'
# else:
# rooms = 'N/A'
label.append(Paragraph("<font face='Times-Roman' size=11>ID#: {}</font>".format(row.student_id), leftStyle))
label.append(Paragraph("<font face='Times-Roman' size=11>Crew #: {}</font>".format(row.crew), leftStyle))
# label.append(Paragraph("<font face='Times-Roman' size=11>Crew Room: {}</font>".format(row.crew.room), leftStyle))
# label.append(Paragraph("<font face='Times-Roman' size=11>W.E.F.S.K. Rotation: {}</font>".format(rooms), leftStyle))
labels.append(label)
if label_num == 0:
labels.append(Spacer(14, 144))
label_num += 1
doc_title = '_'.join(event_name.split())
doc.build(elements)
io.seek(0)
now = datetime.now().strftime('%Y-%m-%d')
filename = "{}_{}_{}.pdf".format(doc_title, now, int(time.time()))
file_id = db.file.insert(name=filename, file=db.file.file.store(io, filename))
db_file = db.file(file_id).file
return dict(filename=db_file)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def update_names(names):
names = json.loads(names)
response = []
for name in names:
r = db.module_names.update_or_insert(name=name['name'], label=name['value'])
response.append(r)
errors = list()
for i in range(len(response)):
if response[i] == 0:
errors.append(names[i])
return dict(errors=errors)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def approve_user(id):
response = db(db.auth_user.id==id).update(registration_key='')
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def disapprove_user(id):
response = db(db.auth_user.id==id).delete()
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def import_from_csv(csv_file):
"""Imports records into the database from a CSV file
:param file: the file to be imported
:param contains_ids: a boolean value which specifies if the records have ids; default is True
:returns: a dictionary with a response, either a 0 or 1, depending on success
"""
response = list()
lines = csv_file.rstrip().splitlines()
if len(lines) > 0:
columns = lines.pop(0).split(',')
for i in range(len(columns)):
columns[i] = '_'.join(columns[i].lower().split())
for line in lines:
record = dict()
line = line.split(',')
for i in range(len(line)):
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
response.append(db.person.update_or_insert(db.person.id==record['id'], **record))
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def import_from_query(csv_file, leaders):
"""Imports records into the database from a CSV file (in the form of the queries from VHS)
:param file: the file to be imported
:returns: a dictionary with a response, either a 0 or 1, depending on success
"""
import csv
import StringIO
leaders = True if leaders=="true" else False
def phone_format(n):
try:
return format(int(n[:-1]), ",").replace(",", "-") + n[-1]
except:
return None
if not leaders:
file_string = StringIO.StringIO(csv_file)
lines = list(csv.reader(file_string, skipinitialspace=True))
del file_string
del csv_file
# INSERT STUDENTS
student_ids = list()
teacher_ids = list()
course_ids = list()
columns = lines.pop(0)
while len(lines) > 0:
record = dict()
line = lines.pop(0)
student_id = line[columns.index('student_id')]
teacher_id = line[columns.index('teacher_id')]
course_id = line[columns.index('course_id')]
if student_id and student_id not in student_ids:
student_ids.append(student_id)
for i in range(len(line)):
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
if record.get('cell_phone', None):
record['cell_phone'] = phone_format(record['cell_phone'])
if record.get('home_phone', None):
record['home_phone'] = phone_format(record['home_phone'])
db.person.update_or_insert(db.person.student_id==student_id, **record)
if teacher_id and teacher_id not in teacher_ids:
teacher_ids.append(teacher_id)
db.teacher.update_or_insert(db.teacher.teacher_id==teacher_id, **{
'teacher_id':line[columns.index('teacher_id')],
'teacher_name':line[columns.index('teacher_name')]})
if course_id and teacher_id and course_id not in course_ids:
course_ids.append(course_id)
teacher = db(db.teacher.teacher_id==teacher_id).select(db.teacher.id).first()
if teacher:
db.course.update_or_insert(db.course.course_id==course_id, **{
'course_id':line[columns.index('course_id')],
'code':line[columns.index('course_code')],
'title':line[columns.index('course_title')],
'period':line[columns.index('period')],
'room':line[columns.index('room')],
'teacher_id':teacher.id})
if course_id and student_id:
course = db(db.course.course_id==course_id).select().first()
student = db(db.person.student_id==student_id).select().first()
if course and student:
db.course_rec.update_or_insert((db.course_rec.course_id==course.id) &
(db.course_rec.student_id==student.id),
course_id=course.id,
student_id=student.id)
db.commit()
del record
del line
return dict(response=True)
else:
errors = list()
lines = list(csv.reader(StringIO.StringIO(csv_file), skipinitialspace=True))
columns = lines.pop(0)
short_tasks = {
'Team Sacrifice (Must have a car and willingness to work later than others)' : 'Team Sacrifice',
"Peer Support (Must be enrolled in Mr. Ward's Psychology or Peer Support class)" : 'Peer Support',
"Tutor/Study Buddy (Academic credits are available for this option)" : 'Tutor/Study Buddy',
"Database Manager (Must know Excel, Mail merge, and other technologies)" : 'Database Manager',
"Facebook Maintenance (You are responsible for up keeping on our page. Must be a FB addict)" : "Facebook Maintenance",
"Fundraising Team" : "Fundraising Team",
"TAs (Work with freshmen and Mr. Varni, Mr. Ward, or Mrs. Housley during the school day (Academic credits are available for this option)": "TAs",
"Posters & Propaganda" : "Posters & Propaganda",
"Public Outreach (Attend Parent Night, Back-to-School, other public events)" : 'Public Outreach',
"ASB Support (Those enrolled in 4th period Leadership class should check this option, but others are welcome as well)" : "ASB Support",
"L.O.C.s (Loyal Order of the Crushers. Attend home athletic and extracurricular events)": "L.O.C.s",
"Dirty 30 (Explain various aspects of high school culture to freshmen on Orientation Day afternoon)" : "Dirty 30",
"Set-up (Room Mapping) and Clean-up (Orientation Day only)": "Set-up and Clean-up",
"Homecoming Parade (Dress up and ride on our float! Easy!)" : "Homecoming Parade",
"Security/Safety (Helps keep freshmen in line; works with Peer Support on Orientation Day)": "Security/Safety",
"Food Prep & Clean-up (Orientation Day only)": "Food Prep & Clean-up",
"Fashion (Make costumes for House Hotties and Homecoming Parade)" : "Fashion",
'Burgundy Beauties and Golden Guns (Formerly "House Hotties")' : "Burgundy Beauties and Golden Guns",
"Audio-Visual (Responsible for music and videos during Orientation)" : "Audio-Visual",
"A-Team (Alumni only)": "A-Team"
}
task_teams = [task.name for task in db().select(db.groups.name)]
for line in lines:
record = dict()
for i in range(len(line)):
if columns[i] == 'last_name' or columns[i] == 'first_name':
line[i] = line[i].capitalize()
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
if record.get('cell_phone', None):
record['cell_phone'] = phone_format(record['cell_phone'])
try:
person = db((db.person.last_name==record['last_name']) &
(db.person.first_name==record['first_name'])).select(db.person.ALL).first()
if person:
person_id = person.id
db(db.person.id==person_id).update(**record)
db(db.person.id==person_id).update(leader=True)
aTasks = line[columns.index('a_tasks')].split(',')
bTasks = line[columns.index('b_tasks')].split(',')
cTasks = line[columns.index('c_tasks')].split(',')
tasks_to_add = list()
for task in aTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in bTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in cTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in tasks_to_add:
if not db((db.group_rec.group_id==task_id) & (db.group_rec.person_id==person_id)).select().first():
db.group_rec.insert(group_id=task_id, person_id=person_id)
except:
errors.append(record['last_name'] + ", " + record['first_name'])
return dict(errors=errors)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def get_person_group_data(query=None):
if query:
qlist = query.split()
query = query.lower()
students = db(((db.person.last_name.contains(qlist, all=True)) |
(db.person.first_name.contains(qlist, all=True))) ).select(
db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.last_name|db.person.first_name).as_list()
allfields = [{'text': 'All', 'children':[d for d in [{'id':'all_people', 'last_name':'All Students', 'first_name' : ''},
{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}] if query in d['last_name'].lower()]}]
allfields = [] if not allfields[0]['children'] else allfields
gradefields = [{'text': 'By Grade', 'children':[d for d in [{'id':'grade_9', 'last_name': 'Freshmen', 'first_name': ''},
{'id':'grade_10', 'last_name': 'Sophomores', 'first_name': ''},
{'id':'grade_11', 'last_name': 'Juniors', 'first_name': ''},
{'id':'grade_12', 'last_name': 'Seniors', 'first_name': ''}] if query in d['last_name'].lower()]}]
gradefields = [] if not gradefields[0]['children'] else gradefields
taskteams = [{'text': 'Task Teams', 'children': [{'id':'group_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db(db.groups.name.contains(qlist)).select(db.groups.ALL, orderby=db.groups.name)]}]
taskteams = [] if not taskteams[0]['children'] else taskteams
students = [] if not students else [{'text': 'Students', 'children':students}]
people = allfields +\
gradefields +\
taskteams +\
students
else:
students = db().select(db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.last_name|db.person.first_name).as_list()
people = [{'text': 'All', | |
<gh_stars>10-100
from http.server import HTTPServer, CGIHTTPRequestHandler
from socketserver import TCPServer
import os
import webbrowser
import multiprocessing
import sqlite3
import urllib.parse
import json
import sys
import argparse
import imp
import yaml
import re
from cravat import ConfigLoader
from cravat import admin_util as au
from cravat import CravatFilter
def get (handler):
head = handler.trim_path_head()
if head == 'service':
serve_service(handler)
elif head == 'widgetfile':
serve_widgetfile(handler)
elif head == 'runwidget':
serve_runwidget(handler)
else:
handler.request_path = head + '/' + handler.request_path
handler.request_path = handler.request_path.rstrip('/')
filepath = get_filepath(handler.request_path)
serve_view(handler, filepath)
### files ###
def get_filepath (path):
filepath = os.sep.join(path.split('/'))
filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'webviewer',
filepath
)
return filepath
def serve_view (handler, filepath):
handler.send_response(200)
if filepath[-4:] == '.css':
handler.send_header('Content-type', 'text/css')
elif filepath[-3:] == '.js':
handler.send_header('Content-type', 'application/javascript')
elif filepath[-4:] == '.png':
handler.send_header('Content-type', 'image/png')
elif filepath[-4:] == '.jpg':
handler.send_header('Content-type', 'image/jpg')
elif filepath[-4:] == '.gif':
handler.send_header('Content-type', 'image/gif')
else:
handler.send_header('Content-type', 'text/html')
handler.end_headers()
with open(filepath, 'rb') as f:
response = f.read()
handler.wfile.write(response)
### service ###
def serve_service (handler):
head = handler.trim_path_head()
queries = handler.request_queries
handler.send_response(200)
handler.send_header('Content-type', 'application/json')
handler.end_headers()
if head == 'variantcols':
content = get_variant_cols(queries)
#if head == 'conf':
# content = get_webviewerconf()
elif head == 'getsummarywidgetnames':
content = get_summary_widget_names(queries)
elif head == 'getresulttablelevels':
content = get_result_levels(queries)
elif head == 'result':
content = get_result(queries)
elif head == 'count':
content = get_count(queries)
elif head == 'widgetlist':
content = get_widgetlist()
elif head == 'status':
content = get_status(queries)
elif head == 'savefiltersetting':
content = save_filter_setting(queries)
elif head == 'savelayoutsetting':
content = save_layout_setting(queries)
elif head == 'loadfiltersetting':
content = load_filtersetting(queries)
elif head == 'loadlayoutsetting':
content = load_layout_setting(queries)
elif head == 'deletelayoutsetting':
content = delete_layout_setting(queries)
elif head == 'renamelayoutsetting':
content = rename_layout_setting(queries)
elif head == 'getlayoutsavenames':
content = get_layout_save_names(queries)
elif head == 'getfiltersavenames':
content = get_filter_save_names(queries)
elif head == 'getnowgannotmodules':
content = get_nowg_annot_modules(queries)
handler.response = bytes(json.dumps(content), 'UTF-8')
handler.wfile.write(handler.response)
def get_nowg_annot_modules (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
wgmodules = au.get_local_module_infos_of_type('webviewerwidget')
annot_modules_with_wg = []
for wgmodule in wgmodules:
conf = wgmodules[wgmodule].conf
if 'required_annotator' in conf:
if wgmodule not in annot_modules_with_wg:
annot_modules_with_wg.append(wgmodule)
nowg_annot_modules = {}
if table_exists(cursor, 'variant'):
q = 'select name, displayname from variant_annotator'
cursor.execute(q)
for r in cursor.fetchall():
m = r[0]
if m in ['example_annotator', 'testannot', 'tagsampler']:
continue
annot_module = 'wg' + r[0]
displayname = r[1]
if annot_module not in annot_modules_with_wg and annot_module not in nowg_annot_modules:
nowg_annot_modules[annot_module] = displayname
content = nowg_annot_modules
return content
def get_filter_save_names (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == False:
content = []
else:
q = 'select distinct name from ' + table + ' where datatype="filter"'
cursor.execute(q)
r = cursor.fetchall()
content = str([v[0] for v in r])
cursor.close()
conn.close()
return content
def get_layout_save_names (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
content = []
if table_exists(cursor, table):
q = 'select distinct name from ' + table + ' where datatype="layout"'
cursor.execute(q)
r = cursor.fetchall()
content = [v[0] for v in r]
cursor.close()
conn.close()
return content
def rename_layout_setting (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
name = urllib.parse.unquote(queries['name'][0])
new_name = urllib.parse.unquote(queries['newname'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == True:
q = 'update ' + table + ' set name="' + new_name + '" where datatype="layout" and name="' + name + '"'
cursor.execute(q)
conn.commit()
cursor.close()
conn.close()
content = {}
return content
def delete_layout_setting (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
name = urllib.parse.unquote(queries['name'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == True:
q = 'DELETE FROM ' + table + ' WHERE datatype="layout" and name="' + name + '"'
cursor.execute(q)
conn.commit()
cursor.close()
conn.close()
content = {}
return content
def load_layout_setting (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
name = urllib.parse.unquote(queries['name'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == False:
content = {"widgetSettings": []}
else:
q = 'select viewersetup from ' + table + ' where datatype="layout" and name="' + name + '"'
cursor.execute(q)
r = cursor.fetchone()
if r != None:
data = r[0]
content = json.loads(data)
else:
content = {"widgetSettings": []}
cursor.close()
conn.close()
return content
def load_filtersetting (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
name = urllib.parse.unquote(queries['name'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == False:
content = {"filterSet": []}
else:
q = 'select viewersetup from ' + table + ' where datatype="filter" and name="' + name + '"'
cursor.execute(q)
r = cursor.fetchone()
if r != None:
data = r[0]
content = json.loads(data)
else:
content = {"filterSet": []}
cursor.close()
conn.close()
return content
def save_layout_setting (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
name = urllib.parse.unquote(queries['name'][0])
savedata = urllib.parse.unquote(queries['savedata'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == False:
q = 'create table ' + table + ' (datatype text, name text, viewersetup text, unique (datatype, name))'
cursor.execute(q)
q = 'replace into ' + table + ' values ("layout", "' + name + '", \'' + savedata + '\')'
cursor.execute(q)
conn.commit()
cursor.close()
conn.close()
content = 'saved'
return content
def save_filter_setting (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
name = urllib.parse.unquote(queries['name'][0])
savedata = urllib.parse.unquote(queries['savedata'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
table = 'viewersetup'
if table_exists(cursor, table) == False:
q = 'create table ' + table + ' (datatype text, name text, viewersetup text, unique (datatype, name))'
cursor.execute(q)
q = 'replace into ' + table + ' values ("filter", "' + name + '", \'' + savedata + '\')'
cursor.execute(q)
conn.commit()
cursor.close()
conn.close()
content = 'saved'
return content
def get_status (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
q = 'select * from info'
cursor.execute(q)
content = {}
for row in cursor.fetchall():
content[row[0]] = row[1]
return content
def get_widgetlist ():
content = []
modules = au.get_local_module_infos_of_type('webviewerwidget')
for module_name in modules:
module = modules[module_name]
conf = module.conf
if 'required_annotator' in conf:
req = conf['required_annotator']
else:
# Removes wg.
req = module_name[2:]
content.append({'name': module_name,
'title': module.title,
'required_annotator': req})
return content
def get_count (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
tab = queries['tab'][0]
if 'filter' in queries:
filterstring = queries['filter'][0]
else:
filterstring = None
cf = CravatFilter(dbpath=dbpath,
mode='sub',
filterstring=filterstring)
n = cf.exec_db(cf.getcount, level=tab)
content = {'n': n}
return content
def get_result (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
tab = queries['tab'][0]
if 'filter' in queries:
filterstring = queries['filter'][0]
else:
filterstring = None
if 'confpath' in queries:
confpath = queries['confpath'][0]
else:
confpath = None
reporter_name = 'jsonreporter'
f, fn, d = imp.find_module(
reporter_name,
[os.path.join(os.path.dirname(__file__), 'webviewer')])
m = imp.load_module(reporter_name, f, fn, d)
args = ['', dbpath]
if confpath != None:
args.extend(['-c', confpath])
if filterstring != None:
args.extend(['--filterstring', filterstring])
reporter = m.Reporter(args)
data = reporter.run(tab=tab)
content = {}
content['stat'] = {'rowsreturned': True,
'wherestr':'',
'filtered': True,
'filteredresultmessage': '',
'maxnorows': 100000,
'norows': data['info']['norows']}
content['columns'] = get_colmodel(tab, data['colinfo'])
content["data"] = get_datamodel(data[tab])
content["status"] = "normal"
return content
def get_result_levels (queries):
conn = sqlite3.connect(queries['dbpath'][0])
cursor = conn.cursor()
sql = 'select name from sqlite_master where type="table" and ' +\
'name like "%_header"'
cursor.execute(sql)
ret = cursor.fetchall()
if len(ret) > 0:
content = [v[0].split('_')[0] for v in ret]
content.insert(0, 'info')
else:
content = []
return content
def get_variant_cols (queries):
dbpath = urllib.parse.unquote(queries['dbpath'][0])
if 'confpath' in queries:
confpath = queries['confpath'][0]
else:
confpath = None
if 'filter' in queries:
filterstring = queries['filter'][0]
else:
filterstring = None
data = {}
data['data'] = {}
data['stat'] = {}
data['status'] = {}
colinfo = get_colinfo(dbpath, confpath, filterstring)
data['columns'] = {}
if 'variant' in colinfo:
data['columns']['variant'] = get_colmodel('variant', colinfo)
if 'gene' in colinfo:
data['columns']['gene'] = get_colmodel('gene', colinfo)
content = data
return content
def get_webviewerconf ():
conf_path = os.path.join(
au.get_system_conf()['home'],
'viewers',
'webviewer',
'webviewer.yml')
with open(conf_path) as f:
conf = yaml.safe_load(f)
return conf
def get_summary_widget_names (queries):
runid = queries['jobid'][0]
def get_datamodel (data):
ret = []
for row in data:
ret.append(list(row))
return ret
def get_colmodel (tab, colinfo):
colModel = []
groupkeys_ordered = []
groupnames = {}
for d in colinfo[tab]['colgroups']:
groupnames[d['name']] = [d['displayname'], d['count']]
groupkeys_ordered.append(d['name'])
dataindx = 0
for groupkey in groupkeys_ordered:
[grouptitle, col_count] = groupnames[groupkey]
columngroupdef = {'title': grouptitle, 'colModel': []}
startidx = dataindx
endidx = startidx + col_count
for d in colinfo[tab]['columns'][startidx:endidx]:
column = {
"col": d['col_name'],
'colgroupkey': groupkey,
'colgroup': grouptitle,
"title": d['col_title'],
"align":"center",
"hidden":False,
"dataIndx": dataindx,
"retfilt":False,
"retfilttype":"None",
"multiseloptions":[]
}
if d['col_type'] == 'string':
column['filter'] | |
from scrapy import Spider, Request
from scrapy.selector import Selector
from webmd.items import WebmdItem
import urllib
import re
headers = {'User-Agent': 'Chrome/56.0.2924.87', 'enc_data': 'OXYIMo2UzzqFUzYszFv4lWP6aDP0r+h4AOC2fYVQIl8=', 'timestamp': 'Thu, 09 Feb 2017 02:11:34 GMT', 'client_id': '3454df96-c7a5-47bb-a74e-890fb3c30a0d'}
class WebmdSpider(Spider):
name = "webmd_spider"
allowed_urls = ['http://www.webmd.com/']
start_urls = ['http://www.webmd.com/drugs/index-drugs.aspx?show=conditions']
def parse(self, response):
# follow links to next alphabet page
atoz = response.xpath('//*[@id="drugs_view"]/li/a/@href').extract()
print("parsing...")
for i in range(2, len(atoz)):
yield Request(response.urljoin(atoz[i]), callback = self.parse_az, dont_filter= True)
def parse_az(self, response):
# follow links to condition
Aa = response.xpath('//*[@id="showAsubNav"]/ul/li').extract()
print("selecting alphabet...")
for i in range(len(Aa)):
yield Request(response.urljoin(response.xpath('//*[@id="showAsubNav"]/ul/li//a/@href').extract()[i]), \
callback = self.parse_condition,\
dont_filter= True)
def parse_condition(self, response):
# follow links to drugs
table = response.xpath('//*[@id="az-box"]/div//a').extract()
print("scraping condition and following link to drugs...")
for i in range(len(table)):
Condition = response.xpath('//*[@id="az-box"]/div//a/text()').extract()[i]
yield Request(response.urljoin(response.xpath('//*[@id="az-box"]/div//a/@href').extract()[i]), \
callback = self.parse_drug, meta = {'Condition' : Condition},\
dont_filter= True)
def parse_drug(self, response):
# following links to drug details
Condition = response.meta['Condition']
print("scraping drug info and following link to details...")
if re.search('Please select a condition below to view a list', response.body):
yield Request(response.urljoin(response.xpath('//*[@id="fdbSearchResults"]/ul/li[1]/a//@href').extract()[0]),\
callback = self.parse_drug, meta = {'Condition': Condition},\
dont_filter= True)
else:
rows = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr').extract()
for i in range(len(rows)):
Drug = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[1]/a/text()').extract()[i]
Indication = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[2]/@class').extract()[i].replace('drug_ind_fmt', '')
Type = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[3]/@class').extract()[i].replace('drug_type_fmt', '')
Review = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[4]/a/text()').extract()[i].replace('\r\n', '')
aspx_index = response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[1]/a/@href').extract()[i].find('aspx') + 4
yield Request(response.urljoin(response.xpath('//*[@id="vit_drugsContent"]/div/div/table[2]/tr/td[1]/a//@href').extract()[i][:aspx_index]),\
callback = self.parse_details, meta = {'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type, 'Review': Review},\
dont_filter= True)
def parse_details(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
print("scraping details and following link to contraindications...")
if re.search('The medication you searched for has more', response.body):
yield Request(response.urljoin(response.xpath('//*[@id="ContentPane28"]/div/section/p[1]/a//@href').extract()[0]), \
callback = self.parse_details, meta = {'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type, 'Review': Review},\
dont_filter= True)
else:
Use = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[1]/div[1]/h3/preceding-sibling::p//text()').extract())
HowtoUse = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[1]/div[1]/h3/following-sibling::p//text()').extract())
Sides = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[2]/div/p[1]//text()').extract()).replace('\r\n', '')
Precautions = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[3]/div/p//text()').extract())
Interactions = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/div/div/div[3]/div[4]/div[1]/p[2]//text()').extract())
revurl = response.xpath('//*[@id="ContentPane28"]/div/div/div/div[2]/nav/ul/li[7]/a//@href').extract()[0]
if re.search('(rx/)(\d+)',response.xpath('//*[@id="ContentPane28"]/div/div/div/div[4]/div[1]/div/a/@href').extract()[0]):
priceid = re.search('(rx/)(\d+)',response.xpath('//*[@id="ContentPane28"]/div/div/div/div[4]/div[1]/div/a/@href').extract()[0]).group(2)
else:
priceid = ''
if not Use:
Use = ' '
if not Sides:
Sides = ' '
if not Interactions:
Interactions = ' '
if not Precautions:
Precautions = ' '
if not HowtoUse:
HowtoUse = ' '
if re.search('COMMON BRAND NAME', response.body):
BrandName = ', '.join(response.xpath('//*[@id="ContentPane28"]/div/header/section/section[1]/p/a/text()').extract())
GenName = response.xpath('//*[@id="ContentPane28"]/div/header/section/section[2]/p/text()').extract()[0]
if not BrandName:
BrandName = ' '
if not GenName:
GenName = ' '
elif re.search('GENERIC NAME', response.body):
BrandName = ' '
GenName = response.xpath('//*[@id="ContentPane28"]/div/header/section/section[1]/p/text()').extract()[0]
if not GenName:
GenName = ' '
else:
GenName = ' '
BrandName = ' '
yield Request(response.urljoin(response.url + '/list-contraindications'),\
callback = self.parse_avoid, meta = {'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type, 'Review': Review,\
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides,\
'Precautions': Precautions,\
'Interactions': Interactions,\
'BrandName': BrandName,\
'GenName': GenName,\
'revurl': revurl,\
'priceid': priceid},\
dont_filter= True)
def parse_avoid(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
revurl = response.meta['revurl']
priceid = response.meta['priceid']
print("scraping avoid use cases...")
if re.search("We\'re sorry, but we couldn\'t find the page you tried", response.body):
AvoidUse = ' '
Allergies = ' '
elif re.search('Conditions:', response.body):
AvoidUse = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/article/section/p[2]/text()').extract())
Allergies = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/article/section/p[3]/text()').extract())
elif re.search('Allergies:', response.body):
AvoidUse = ' '
Allergies = ' '.join(response.xpath('//*[@id="ContentPane28"]/div/article/section/p[2]/text()').extract())
else:
AvoidUse = ' '
Allergies = ' '
if not AvoidUse:
AvoidUse = ' '
if not Allergies:
Allergies = ' '
yield Request(response.urljoin(revurl), \
callback=self.parse_reviews,
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse,\
'Allergies': Allergies,\
'priceid': priceid}, \
dont_filter=True)
def parse_reviews(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
priceid = response.meta['priceid']
if re.search('Rate this treatment and share your opinion', response.body):
Effectiveness = ' '
EaseofUse = ' '
Satisfaction = ' '
yield Request('http://www.webmd.com/search/2/api/rx/forms/v2/' + priceid, \
method='GET', headers=headers, \
callback=self.parse_prices, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness, \
'EaseofUse': EaseofUse, \
'Satisfaction': Satisfaction}, \
dont_filter=True)
elif re.search('Be the first to share your experience with this treatment', response.body):
Effectiveness = ' '
EaseofUse = ' '
Satisfaction = ' '
yield Request('http://www.webmd.com/search/2/api/rx/forms/v2/' + priceid, \
method='GET', headers=headers, \
callback=self.parse_prices, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness, \
'EaseofUse': EaseofUse, \
'Satisfaction': Satisfaction}, \
dont_filter=True)
else:
url = 'http://www.webmd.com/drugs/service/UserRatingService.asmx/GetUserReviewSummary?repositoryId=1&primaryId=' # 6007&secondaryId=-1&secondaryIdValue='
url2 = '&secondaryId=-1&secondaryIdValue='
id = re.search('(drugid=)(\d+)', response.url).group(2)
id2 = urllib.quote(re.sub("\s+", " ", response.xpath('//option[@value = -1]//text()').extract()[0]).strip())
yield Request(url + id + url2 + id2,\
callback= self.parse_ratings, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies, \
'priceid': priceid}, \
dont_filter=True)
def parse_ratings(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
priceid = response.meta['priceid']
if re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[3]):
Effectiveness = re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[3]).group(2)
else:
Effectiveness = re.search('("xsd:string">)(\d+)',response.xpath('//*/*').extract()[3]).group(2)
if re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[4]):
EaseofUse = re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[4]).group(2)
else:
EaseofUse = re.search('("xsd:string">)(\d+)',response.xpath('//*/*').extract()[4]).group(2)
if re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[5]):
Satisfaction = re.search('("xsd:string">)(\d+.\d+)',response.xpath('//*/*').extract()[5]).group(2)
else:
Satisfaction = re.search('("xsd:string">)(\d+)',response.xpath('//*/*').extract()[5]).group(2)
if priceid != '':
yield Request('http://www.webmd.com/search/2/api/rx/forms/v2/'+priceid,\
method='GET', headers=headers, \
callback=self.parse_prices, \
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness,\
'EaseofUse': EaseofUse,\
'Satisfaction': Satisfaction}, \
dont_filter=True)
else:
strength = ' '
form = ' '
val = ' '
EstimatedPrice = ' '
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] = Use
item['HowtoUse'] = HowtoUse
item['Precautions'] = Precautions
item['Interactions'] = Interactions
item['Sides'] = Sides
item['Condition'] = Condition
item['Drug'] = Drug
item['Indication'] = Indication
item['Type'] = Type
item['Review'] = Review
item['BrandName'] = BrandName
item['GenName'] = GenName
item['Effectiveness'] = Effectiveness
item['EaseofUse'] = EaseofUse
item['Satisfaction'] = Satisfaction
item['EstimatedPrice'] = EstimatedPrice
item['Dosage'] = strength
item['PkgCount'] = val
item['Form'] = form
yield item
def parse_prices(self, response):
Condition = response.meta['Condition']
Drug = response.meta['Drug']
Indication = response.meta['Indication']
Type = response.meta['Type']
Review = response.meta['Review']
Use = response.meta['Use']
HowtoUse = response.meta['HowtoUse']
Sides = response.meta['Sides']
Precautions = response.meta['Precautions']
Interactions = response.meta['Interactions']
BrandName = response.meta['BrandName']
GenName = response.meta['GenName']
AvoidUse = response.meta['AvoidUse']
Allergies = response.meta['Allergies']
Effectiveness = response.meta['Effectiveness']
EaseofUse = response.meta['EaseofUse']
Satisfaction = response.meta['Satisfaction']
if re.search('("NDC":\[")(\d+)', response.body):
if re.search('("value":)(\d+)', response.body).group(2):
ndc = re.search('("NDC":\[")(\d+)', response.body).group(2)
val = re.search('("value":)(\d+)', response.body).group(2)
if re.search('("form":")(\w+)', response.body):
form = re.search('("form":")(\w+)', response.body).group(2)
else:
form = ' '
if re.search('("strength":")(\d+\s+\w+)', response.body):
strength = re.search('("strength":")(\d+\s+\w+)', response.body).group(2)
else:
strength = ' '
urlp = 'http://www.webmd.com/search/2/api/rx/pricing/ndc/'
urlp2 = '00000?lat=40.7466&lng=-73.9098&rad=5&rollup=true&pgroup='
yield Request(urlp + ndc + '/' + val + '/' + urlp2, \
method='GET',
headers=headers,
callback=self.parse_estprice,
meta={'Condition': Condition, 'Drug': Drug, 'Indication': Indication, 'Type': Type,
'Review': Review, \
'Use': Use, \
'HowtoUse': HowtoUse, \
'Sides': Sides, \
'Precautions': Precautions, \
'Interactions': Interactions, \
'BrandName': BrandName, \
'GenName': GenName, \
'AvoidUse': AvoidUse, \
'Allergies': Allergies,
'Effectiveness': Effectiveness, \
'EaseofUse': EaseofUse, \
'Satisfaction': Satisfaction,\
'strength': strength,\
'val': val,\
'form': form}, \
dont_filter=True)
else:
strength = ' '
form = ' '
val= ' '
EstimatedPrice = ' '
item = WebmdItem()
item['AvoidUse'] = AvoidUse
item['Allergies'] = Allergies
item['Use'] | |
from io import TextIOWrapper
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template import RequestContext
from django.views.generic import UpdateView, CreateView, DetailView
from prescriptions.models import Prescription
from .forms import *
from .models import *
def is_patient(user):
"""
Helper function that checks if a user is a patient
:param user: The user to be checked
:return: True if user is a patient
"""
if user:
return user.groups.filter(name='Patient').count() != 0
return False
def is_doctor(user):
"""
Helper function that checks if a user is a doctor
:param user: The user to be checked
:return: True if user is a doctor
"""
if user:
return user.groups.filter(name='Doctor').count() != 0
return False
def is_nurse(user):
"""
Helper function that checks if a user is a nurse
:param user: The user to be checked
:return: True if user is a nurse
"""
if user:
return user.groups.filter(name='Nurse').count() != 0
return False
def is_doctor_or_nurse(user):
"""
Uses above functions combined to fit the @user_passes_test mixin
:param user: The User in question
:return: True if the user is a Doctor or Nurse
"""
return is_doctor(user) or is_nurse(user)
def not_patient(user):
"""
Users is_patient funtion to test if user is of patient type
:param user: The User in question
:return: True if user is not a patient
"""
return not is_patient(user)
def is_admin(user):
"""
Helper function that checks if a user is an admin
:param user: The user to be checked
:return: True if user is an admin
"""
if user:
return user.groups.filter(name='Admin').count() != 0
return False
def user_login(request):
"""
Renders the user login page, and redirects the user to the appropriate landing page
:param request: The request with user information
:return: The page to be rendered
"""
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
# Register Log
log = Log.objects.create_Log(user, user.username, timezone.now(), user.username + " logged in")
log.save()
return HttpResponseRedirect(reverse('landing'))
else:
return HttpResponse("Your Account has been Deactivated")
else:
print("Invalid login: {0}".format(username))
context = RequestContext(request)
context['login_failure'] = True
return render(request, 'core/login.html', context)
else:
return render(request, 'core/login.html', RequestContext(request))
@login_required
def user_logout(request):
"""
Logs out a user, and logs it
:param request: The request with user information
:return: The page to be rendered
"""
# Register Log
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
request.user.username + " logged out")
log.save()
logout(request)
return HttpResponseRedirect(reverse('login'))
@login_required
@user_passes_test(is_patient)
def patient_landing(request):
"""
Renders the patient landing page
:param request: The request with user information
:return: The page to be rendered
"""
return render(request, 'core/landing/Patient.html')
@login_required
def profile(request):
"""
Displays the user Profile Information
:param request: The request with user information
:return: The page to be rendered
"""
parent = get_parent(request)
return render(request, 'core/landing/pages/profile.html', {'parent': parent})
def QueryListtoString(query):
"""
Used to convert Query lists to readable strings, used in the following Medical Information Export function.
:param query: the query to convert
:return: the readable string
"""
ans = ""
for q in query.iterator():
ans = ans + str(q) + '\n'
return ans
def MediInfoExport(Patient_exporting: Patient, assoc_user: User, is_email):
"""
Generic getter for a patient's complete medical information into a readable format in a String
:param Patient_exporting: The Patient exporting their info
:param assoc_user: The Patient's associated User
:param is_email: True if this is being sent in an email (adds greeting), false otherwise
:return: The complete text export
"""
Name = 'Name: ' + str(assoc_user.get_full_name())
Email = 'Email: ' + str(assoc_user.email)
Birthday = 'Birthday: ' + str(Patient_exporting.birthday)
Gender = 'Sex: ' + str(dict(Patient_exporting.SEX_CHOICE)[Patient_exporting.sex])
Blood_Type = 'Blood-Type: ' + str(dict(Patient_exporting.BLOOD_TYPE)[Patient_exporting.blood_type])
Height = 'Height: ' + str(Patient_exporting.height)
Weight = 'Weight: ' + str(Patient_exporting.weight) + ' lbs'
Allergies = 'Allergies: \r\n' + str(Patient_exporting.allergies)
Medical_History = 'Medical-History: \r\n' + str(Patient_exporting.medical_history)
Prescriptions = 'Prescriptions: \r\n' + \
str(QueryListtoString(Prescription.objects.all().filter(patient=Patient_exporting)))
Insurance_Info = 'Insurance-Info: ' + str(Patient_exporting.insurance_info)
Preferred_Hospital = 'Preferred-Hospital: ' + str(Patient_exporting.preferred_hospital)
PHospital = 'Current-Hospital: ' + str(Patient_exporting.hospital)
Emergency_Contact = 'Emergency-Contact: ' + str(Patient_exporting.emergency_contact)
ans = Name + '\r\n' + \
Email + '\r\n' + \
Birthday + '\r\n' + \
Gender + '\r\n' + \
Blood_Type + '\r\n' + \
Height + '\r\n' + \
Weight + '\r\n\r\n' + \
Allergies + '\r\n\r\n' + \
Medical_History + '\r\n\r\n' + \
Prescriptions + '\r\n\r\n' + \
Insurance_Info + '\r\n' + \
Preferred_Hospital + '\r\n' + \
PHospital + '\r\n' + \
Emergency_Contact + '\r\n'
if is_email:
return 'Hello ' + str(assoc_user.first_name) + \
', \n\n\tYou are receiving this email as an export of your medical information from ' + \
str(Patient_exporting.hospital) + '. Below you\'ll find the medical record export. ' \
'Thank you for using HealthNet!\n\n' + ans
return ans
@login_required
@user_passes_test(is_patient)
def email(request):
"""
Sends the patient an email with a full summary of their medical information.
:param request: The request with user information
:return: The success landing page
"""
Pat = Patient.objects.all().get(user=request.user)
if request.user.email_user('Medical Information Export: ' + request.user.get_full_name(),
MediInfoExport(Pat, request.user, True),
'<EMAIL>',
fail_silently=True,
):
return render(request, 'core/landing/pages/email_success.html')
else:
return render(request, 'core/landing/pages/profile.html', {'parent': get_parent(request)})
@login_required
@user_passes_test(is_patient)
def download(request):
"""
Serves patients full summary as a downloadable text file.
:param request: The request with user information
:return: Downloadable text file, in lieu of a conventional response
"""
Pat = Patient.objects.all().get(user=request.user)
content = MediInfoExport(Pat, request.user, False)
response = HttpResponse(content, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename="%s_Info.txt"' % \
str(request.user.get_full_name()).replace(' ', '-')
return response
def listtostring(listin):
"""
Converts a simple list into a space separated sentence, effectively reversing str.split(" ")
:param listin: the list to convert
:return: the readable string
"""
ans = ""
for l in listin:
ans = ans + str(l) + " "
return ans.strip()
def read_new_Patient(filename, encoding, doctor_user):
"""
Reads in a new Patient from the specific file, assumes that the patient instance already exists and is associated
with an existing user, but not necessarily populated.
:param doctor_user: User of Doctor signing off on Patient import, used when constructing Prescriptions
:param filename: Name of the file to read from
:param encoding: UTF-8, ANSI, etc etc
:return: The newly populated Patient class (after its been saved)
"""
# print("reading new patient...")
file = TextIOWrapper(filename.file, encoding=encoding)
new_patient = None
Allergies_mode = False
Prescriptions_mode = False
Medical_History_mode = False
Allergies = ''
Medical_History = ''
Prescriptions = []
for line in file.readlines():
print("Line: " + line)
words = line.strip().split(" ")
print(words)
instance_var = words[0]
# print("Current variable is " + instance_var)
if Allergies_mode:
if line.strip() != '':
# print('found allergy: ' + line.strip())
Allergies = Allergies + line.strip()
else:
# print('And that\'s it for allergies')
Allergies_mode = False
new_patient.allergies = Allergies
elif Medical_History_mode:
if line.strip() != '':
# print('found medical history: ' + line.strip())
Medical_History = Medical_History + line.strip()
else:
# print('And that\'s it for medical history')
Medical_History_mode = False
new_patient.medical_history = Medical_History
elif Prescriptions_mode:
if line.strip() != '':
# print('found prescription: ' + line.strip())
Prescriptions.append(line.strip())
else:
# print('And that\'s it for prescriptions')
Prescriptions_mode = False
for p in Prescriptions:
Prescription.fromString(p, new_patient.id, doctor_user)
if instance_var == 'Email:':
Email = words[1]
print("found email: " + Email)
user = User.objects.get(email=Email)
new_patient = Patient.objects.get(user=user)
print(new_patient)
elif instance_var == 'Birthday:':
print("found b-day: " + words[1])
new_patient.birthday = words[1]
elif instance_var == 'Sex:':
print("found sex: " + words[1])
new_patient.sex = words[1]
elif instance_var == 'Blood-Type:':
print("found b-type: " + words[1])
new_patient.blood_type = words[1]
elif instance_var == 'Height:':
print("found height: " + words[1])
new_patient.height = words[1]
elif instance_var == 'Weight:':
print("found weight: " + words[1])
new_patient.weight = words[1]
elif instance_var == 'Allergies:':
print("found Allergies")
Allergies_mode = True
elif instance_var == 'Medical-History::':
print("found Medical History")
Medical_History_mode = True
elif instance_var == 'Prescriptions:':
print("found prescriptions")
Prescriptions_mode = True
elif instance_var == 'Insurance-Info:':
insurance = listtostring(words[1:])
print("found Insurance: " + insurance)
new_patient.insurance_info = insurance
elif instance_var == 'Preferred-Hospital:':
p_hospital = listtostring(words[1:])
print("found hospital: " + p_hospital)
new_patient.preferred_hospital = Hospital.objects.get(name=p_hospital)
elif instance_var == 'Emergency-Contact:':
print("found e-contact: " + words[1])
new_patient.emergency_contact = words[1]
# elif instance_var == 'Current-Hospital:':
# c_hospital = listtostring(words[1:])
# print("found hospital: " + c_hospital)
# new_patient.hospital = Hospital.objects.get(name=c_hospital)
return new_patient.save()
@login_required
@user_passes_test(is_doctor_or_nurse)
def upload_patient_info(request):
"""
View for uploading a text file | |
at the
| first point through which
| the conic hybConic passes to the inverse of the one of the direction
| used for
| the tangent.
|
|
| passingPtIdx = 1
| passingPtTgtOrient = -1
| hybConic.SetIntermediateTangentDirectionFlag passingPtIdx,
| passingPtTgtOrient
:param int i_index_point:
:param int i_orientation:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.SetIntermediateTangentDirectionFlag(i_index_point, i_orientation)
def set_start_and_end_tangents_plus_conic_parameter(self, i_start_tgt: HybridShapeDirection,
i_end_tgt: HybridShapeDirection, i_conic_param: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetStartAndEndTangentsPlusConicParameter(HybridShapeDirection
| iStartTgt,
| HybridShapeDirection iEndTgt,
| double iConicParam)
|
| Sets the tangent directions at conic start and end points, and the conic
| parameter.
|
| Parameters:
|
| iStartTgt
| The tangent direction at the start point
| iEndTgt
| The tangent direction at the end point
| iConicParam
| The conic parameter
| Legal values: p = 0.5 (parabola), 0<=p<=0.5 (ellipse), 0.5<= p <=1.0 (hyperbola)
|
| Example:
|
| This example sets firstDir and secondDir as the tangent directions at
| the start
| and end points of the conic hybConic, and conicParm as the conic
| parameter.
|
|
| hybConic.SetStartAndEndTangentsPlusConicParameter firstDir, secondDir,
| conicParm
:param HybridShapeDirection i_start_tgt:
:param HybridShapeDirection i_end_tgt:
:param float i_conic_param:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.SetStartAndEndTangentsPlusConicParameter(i_start_tgt.com_object,
i_end_tgt.com_object, i_conic_param)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_start_and_end_tangents_plus_conic_parameter'
# # vba_code = """
# # Public Function set_start_and_end_tangents_plus_conic_parameter(hybrid_shape_conic)
# # Dim iStartTgt (2)
# # hybrid_shape_conic.SetStartAndEndTangentsPlusConicParameter iStartTgt
# # set_start_and_end_tangents_plus_conic_parameter = iStartTgt
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_start_and_end_tangents_plus_passing_point(self, i_start_tgt: HybridShapeDirection,
i_end_tgt: HybridShapeDirection, i_passing_pt: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetStartAndEndTangentsPlusPassingPoint(HybridShapeDirection
| iStartTgt,
| HybridShapeDirection iEndTgt,
| Reference iPassingPt)
|
| Sets the tangent directions at conic start and end points, and a passing
| point.
|
| Parameters:
|
| iStartTgt
| The tangent direction at the start point.
| Sub-element(s) supported (see
|
| Boundary object): Vertex.
| iEndTgt
| The tangent direction at the end point
| iPassingPt
| A point through which the conic must pass.
| Legal values: This point must differ from the start and end points.
|
| Example:
|
| This example sets firstDir and secondDir as the tangent directions at
| the start
| and end points of the conic hybConic, and passingPoint as a point
| through
| which the conic must pass.
|
|
| hybConic.SetStartAndEndTangentsPlusPassingPoint firstDir, secondDir,
| passingPoint
:param HybridShapeDirection i_start_tgt:
:param HybridShapeDirection i_end_tgt:
:param Reference i_passing_pt:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.SetStartAndEndTangentsPlusPassingPoint(i_start_tgt.com_object,
i_end_tgt.com_object,
i_passing_pt.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_start_and_end_tangents_plus_passing_point'
# # vba_code = """
# # Public Function set_start_and_end_tangents_plus_passing_point(hybrid_shape_conic)
# # Dim iStartTgt (2)
# # hybrid_shape_conic.SetStartAndEndTangentsPlusPassingPoint iStartTgt
# # set_start_and_end_tangents_plus_passing_point = iStartTgt
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_start_tangent_direction_flag(self, i_orientation: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetStartTangentDirectionFlag(long iOrientation)
|
| Sets the tangent direction orientation at the conic start
| point.
|
| Parameters:
|
| iOrientation
| The direction orientation to be applied to the tangent direction at
| the conic start point
| Legal values: 1 if the tangent direction is to be used as is, and
| -1 if it must be inverted
|
| Example:
|
| This example sets the direction orientation of the tangent at the
| start point of
| the conic hybConic to the inverse of the one of the direction used
| for
| the tangent.
|
|
| startPtTgtOrient = -1
| hybConic.SetStartTangentDirectionFlag
| startPtTgtOrient
:param int i_orientation:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.SetStartTangentDirectionFlag(i_orientation)
def set_tangent_intersect_point_plus_conic_parm(self, i_tgt_int: Reference, i_conic_param: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetTangentIntersectPointPlusConicParm(Reference
| iTgtInt,
| double iConicParam)
|
| Sets the intersection point of the conic tangents to the start and end
| points, and the conic parameter.
|
| Parameters:
|
| iTgtInt
| The point intersection of the conic tangents to the start and end
| point.
| Sub-element(s) supported (see
|
| Boundary object): Vertex.
| iConicParam
| The conic parameter
| Legal values: p = 0.5 (parabola), 0<=p<=0.5 (ellipse), 0.5<= p <=1.0 (hyperbola)
| Example:
|
| This example sets tgtIntPoint as the intersection point of the
| tangents
| to the start and end points of the conic hybConic, and conicParm as
| the conic parameter.
|
|
| hybConic.SetTangentIntersectPointPlusConicParm tgtIntPoint,
| conicParm
:param Reference i_tgt_int:
:param float i_conic_param:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.SetTangentIntersectPointPlusConicParm(i_tgt_int.com_object, i_conic_param)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_tangent_intersect_point_plus_conic_parm'
# # vba_code = """
# # Public Function set_tangent_intersect_point_plus_conic_parm(hybrid_shape_conic)
# # Dim iTgtInt (2)
# # hybrid_shape_conic.SetTangentIntersectPointPlusConicParm iTgtInt
# # set_tangent_intersect_point_plus_conic_parm = iTgtInt
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_tangent_intersect_point_plus_passing_point(self, i_tgt_int: Reference, i_passing_pt: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetTangentIntersectPointPlusPassingPoint(Reference
| iTgtInt,
| Reference iPassingPt)
|
| Sets the intersection point of the conic tangents to the start and end
| points, and a passing point.
|
| Parameters:
|
| iTgtInt
| The point intersection of the conic tangents to the start and end
| point.
| Sub-element(s) supported (see
|
| Boundary object): Vertex.
| iPassingPt
| A point through which the conic must pass.
| Legal values: This point must differ from the start and end
| points.
| Sub-element(s) supported (see Boundary object):
| Vertex.
| Example:
|
| This example sets tgtIntPoint as the intersection point of the
| tangents
| to the start and end points of the conic hybConic, and passingPoint as
| a point through
| which the conic must pass.
|
|
| hybConic.SetTangentIntersectPointPlusPassingPoint tgtIntPoint,
| passingPoint
:param Reference i_tgt_int:
:param Reference i_passing_pt:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.SetTangentIntersectPointPlusPassingPoint(i_tgt_int.com_object,
i_passing_pt.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove | |
#
# _/_/ _/_/_/ _/_/_/_/_/ _/_/ _/_/_/ _/ _/ _/_/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/ _/_/_/ _/ _/_/ _/ _/_/ _/_/_/
#
#
# __ _____ __ _____ __ _ __
# / / __ __ / ___/__ _______ / / ___ ___ / ___/__ / /__ (_) /_____ ____
# / _ \/ // / / (_ / -_) __(_-</ _ \/ _ \/ _ \ / /__/ -_) / _ \/ / '_/ -_) __/
# /_.__/\_, / \___/\__/_/ /___/_//_/\___/_//_/ \___/\__/_/_//_/_/_/\_\\__/_/
# /___/
from tqdm import tqdm
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold
from .misc import mem_measure, timer
from sklearn.linear_model import LassoCV
from IPython.display import set_matplotlib_formats
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import time
import pandas as pd
import numpy as np
import lightgbm as lgb
import tracemalloc
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
warnings.simplefilter(action="ignore", category=FutureWarning)
# ML visualizations
def plot_imp(
clf, X, title, model="lgbm", num=30, importaince_type="gain", save_path=None
):
# Feature importance plot supporting LGBM, RN and Catboost, return the list of features importance sorted by their contribution
#sns.set_style("whitegrid")
gcbest = ["#3498db", "#2ecc71"]
sns.set_context("paper", font_scale=1)
sns.set_palette(gcbest)
if model == "catboost":
feature_imp = pd.DataFrame(
{"Value": clf.get_feature_importance(), "Feature": X.columns}
)
elif model == "lgbm":
feature_imp = pd.DataFrame(
{
"Value": clf.feature_importance(importance_type=importaince_type),
"Feature": X.columns,
}
)
else:
feature_imp = pd.DataFrame(
{"Value": clf.feature_importance(), "Feature": X.columns}
)
plt.figure(figsize=(11, num / 2.2))
sns.set(font_scale=0.85)
sns.barplot(
color="#3498db",
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title(title)
plt.tight_layout()
if save_path:
plt.savefig(save_path, dpi=100)
plt.show()
return feature_imp
def confusion_matrix_plot(y_test, y_predict, save_path=None):
# Confusion Matrix plot, binary classification including both normalized and absolute values plots
set_matplotlib_formats('svg')
plt.figure()
cm = confusion_matrix(y_test, y_predict)
cmn = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] * 100
sns.set(font_scale=0.85)
labels = ["0", "1"]
plt.figure(figsize=(6, 5))
# sns.heatmap(cm, xticklabels = labels, yticklabels = labels, annot = True, fmt='d', cmap="Blues", vmin = 0.2);
sns.heatmap(
cmn, xticklabels=labels, yticklabels=labels, annot=True, fmt=".2f", cmap="Blues"
)
plt.title("Confusion Matrix")
plt.ylabel("True Class")
plt.xlabel("Predicted Class")
plt.show()
plt.figure()
cm = confusion_matrix(y_test, y_predict)
cmn = cm.astype("int")
sns.set(font_scale=0.85)
labels = ["0", "1"]
plt.figure(figsize=(6, 5))
# sns.heatmap(cm, xticklabels = labels, yticklabels = labels, annot = True, fmt='d', cmap="Blues", vmin = 0.2);
sns.heatmap(
cmn, xticklabels=labels, yticklabels=labels, annot=True, fmt="d", cmap="Blues"
)
plt.title("Confusion Matrix")
plt.ylabel("True Class")
plt.xlabel("Predicted Class")
if save_path:
plt.savefig(save_path, dpi=100)
plt.show()
def target_corr(X, y, df_cols, save_path=None):
# Feature correlations to the target
# catCols = X.select_dtypes(object").columns
catCols = X[df_cols].select_dtypes(include=["category", object]).columns
# print (catCols)
X = X[df_cols].drop(columns=catCols)
# reg = LassoCV(n_alphas=30, eps=1e-3, max_iter=20, precompute=False)
reg = LassoCV()
reg.fit(X.fillna(-1), y)
sns.set_style("whitegrid")
# print("Best alpha using built-in LassoCV: %f" % reg.alpha_)
print("Best score using built-in LassoCV: %f" % reg.score(X.fillna(-1), y))
coef = pd.Series(reg.coef_, index=X.columns)
imp_coef = coef.sort_values()
size = len(X.columns) / 1.6
plt.rcParams["figure.figsize"] = (10, size)
imp_coef.plot(kind="barh", color="#3498db")
plt.title("Features correlations to target")
if save_path:
plt.savefig(save_path)
def label_dist(df, label, y=None):
# Target distribution analysis
gcbest = ["#3498db", "#2ecc71"]
sns.set_context("paper", font_scale=1)
sns.set_palette(gcbest)
fig, ax = plt.subplots(1, 2)
plt.figure(figsize=(2, 3))
#sns.set_style("whitegrid")
set_matplotlib_formats('svg')
#plt.style.use("seaborn-notebook")
#sns.set_context("paper", font_scale=1.3)
sns.set_context(font_scale=1)
if y is not None:
splot = sns.countplot("label", data=y.to_frame("label").reset_index(), ax=ax[0])
for p in splot.patches:
splot.annotate(
format(p.get_height(), ".0f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="center",
xytext=(0, 10),
textcoords="offset points",
)
y.value_counts().plot.pie(explode=[0, 0.2], autopct="%1.2f%%", ax=ax[1])
else:
splot = sns.countplot(label, data=df, ax=ax[0])
for p in splot.patches:
splot.annotate(
format(p.get_height(), ".0f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="center",
xytext=(0, 10),
textcoords="offset points",
)
df[label].value_counts().plot.pie(explode=[0, 0.2], autopct="%1.2f%%", ax=ax[1])
fig.show()
def roc_curve_plot(y_test, predictions, save_path=None):
# Roc curve visualization, binary classification including AUC calculation
set_matplotlib_formats('svg')
gcbest = ["#3498db", "#2ecc71"]
sns.set_palette(gcbest)
sns.set_context("paper", font_scale=1)
#plt.style.use('classic')
sns.set_style("whitegrid")
rf_roc_auc = roc_auc_score(y_test, predictions)
rf_fpr, rf_tpr, rf_thresholds = roc_curve(y_test, predictions)
plt.figure(figsize=(6, 5.5))
plt.plot(rf_fpr, rf_tpr, label="AUC = %0.3f" % rf_roc_auc, color="#3498db")
plt.plot([0, 1], [0, 1], "r--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate", fontsize=11)
plt.ylabel("True Positive Rate", fontsize=11)
plt.title("Receiver operating characteristic", fontsize=12)
plt.legend(loc="lower right")
if save_path:
plt.savefig(save_path, dpi=100)
plt.show()
def hist_target(df, feature, target):
# histogram with an hue of the target class
set_matplotlib_formats('svg')
gcbest = ["#3498db", "#2ecc71"]
sns.set_context("paper", font_scale=1)
sns.set_palette(gcbest)
sns.displot(
data=df,
bins=25,
kind="hist",
x=feature,
hue=target,
multiple="stack",
height=3.2,
aspect=1.6,
)
def target_pie(df, target):
# pie chart of the target class distribution
sns.set_style("whitegrid")
gcbest = ["#3498db", "#2ecc71"]
sns.set_palette(gcbest)
#plt.style.use("fivethirtyeight")
plt.figure(figsize=(4, 3))
#sns.set_context("paper", font_scale=1)
df[target].value_counts().plot.pie(explode=[0, 0.2], autopct="%1.2f%%")
def cv_plot(
arr_f1_weighted,
arr_f1_macro,
arr_f1_positive,
AxisName,
mode="full",
save_path=None,
):
# Visualization of the CV folds, F1 macro and F1 positive class
set_matplotlib_formats('svg')
sns.set_context(font_scale=1)
gcbest = ["#3498db", "#2ecc71"]
sns.set_palette(gcbest)
#plt.style.use("fivethirtyeight")
#sns.set_context("paper", font_scale=1)
sns.set_style("whitegrid")
if mode == "fast":
plt.figure(figsize=(5, 6))
else:
plt.figure(figsize=(10,5.58))
index = np.arange(len(arr_f1_weighted))
bar_width = 0.30
opacity = 0.8
plt.bar(
index - bar_width / 2, arr_f1_macro, bar_width, alpha=opacity, label="F1 Macro"
)
plt.bar(
index + bar_width / 2,
arr_f1_positive,
bar_width,
alpha=opacity,
label="F1 Positive",
)
plt.xticks(np.arange(len(arr_f1_weighted)), fontsize=10)
#plt.yticks([0,0.2,0.4,0.6,0.8,1],fontsize=13)
plt.yticks(fontsize=10)
plt.ylabel("F1", fontsize=10)
plt.xlabel("Folds", fontsize=10)
plt.title("%s, 5-Folds Cross Validation" % AxisName[0 : len(AxisName)], fontsize=13)
plt.legend(["F1 macro", "F1 positive"], loc="upper right", fontsize=10)
plt.grid(True)
if save_path:
plt.savefig(save_path)
def preds_distribution(
y_true,
y_pred,
bins=100,
title="Predictions Distribution",
normalize=False,
ax=None,
title_fontsize="medium",
max_y=None,
save_path=None,
):
set_matplotlib_formats('svg')
sns.set_style("whitegrid")
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(9, 4.8))
predictions_proba = np.array(y_pred)
y_bool = np.array(y_true) > 0
y_pred_true = predictions_proba[y_bool]
y_pred_false = predictions_proba[~y_bool]
# print (y_pred_true)
# matplotlib normalize is using the bin width, just calculate it by our own...
weights_false = (
np.ones(len(y_pred_false)) / len(y_pred_false) if normalize else None
)
weights_true = np.ones(len(y_pred_true)) / len(y_pred_true) if normalize else None
ax.hist(
y_pred_false,
bins=bins,
color="r",
alpha=0.5,
label="negative",
weights=weights_false,
)
ax.hist(
y_pred_true,
bins=bins,
color="g",
alpha=0.5,
label="positive",
weights=weights_true,
)
ax.set_title(title, fontsize=title_fontsize)
# _set_lim(max_y, ax.set_ylim)
ax.set_ylim(0, max_y)
ax.legend(loc="best")
if save_path:
plt.savefig(save_path, dpi=100)
plt.show()
return ax
def target_shape(df, target):
fig, ax = plt.subplots(1, 2)
set_matplotlib_formats('svg')
plt.style.use("fivethirtyeight")
plt.figure(figsize=(2.5, 3.5))
sns.set_context("paper", font_scale=1.2)
splot = sns.countplot(target, data=df, ax=ax[0])
for p in splot.patches:
splot.annotate(
format(p.get_height(), ".0f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="center",
xytext=(0, 10),
textcoords="offset points",
)
df[target].value_counts().plot.pie(explode=[0, 0.2], autopct="%1.2f%%", ax=ax[1])
fig.show()
def lgbm(X_train, y_train, X_test, y_test, num, params=None):
# Training function for LGBM with basic categorical features treatment and close to default params
categorical_features = []
for c in X_train.columns:
col_type = X_train[c].dtype
if col_type == "object" or col_type.name == "category":
# an option in case the data(pandas dataframe) isn't passed with the categorical column type
# X[c] = X[c].astype('category')
categorical_features.append(c)
for c in X_test.columns:
col_type = X_test[c].dtype
if col_type == "object" or col_type.name == "category":
# an option in case the data(pandas dataframe) isn't passed with the categorical column type
# X[c] = X[c].astype('category')
categorical_features.append(c)
lgb_train = lgb.Dataset(X_train, y_train, categorical_feature=categorical_features)
lgb_valid = lgb.Dataset(X_test, y_test, categorical_feature=categorical_features)
if params == None:
params = {
"objective": "binary",
"boosting": "gbdt",
"scale_pos_weight": 0.02,
"learning_rate": 0.005,
"seed": 100
# 'categorical_feature': 'auto',
# 'metric': 'auc',
# 'scale_pos_weight':0.1,
# 'learning_rate': 0.02,
# 'num_boost_round':2000,
# "min_sum_hessian_in_leaf":1,
# 'max_depth' : 100,
# "bagging_freq": 2,
# "num_leaves":31,
# "bagging_fraction" : 0.4,
# "feature_fraction" : 0.05,
}
clf = lgb.train(
params, lgb_train, valid_sets=[lgb_train, lgb_valid], num_boost_round=num
)
return clf
def adjusted_classes(y_scores, t):
# transformation from prediction probabolity to class given the threshold
return [1 if y >= t else 0 for y in y_scores]
@timer
@mem_measure
def cv_adv(
X,
y,
threshold,
iterations,
shuffle=True,
params=None,
mode="classification",
method="full",
):
# Cross Validation - stratified with and without shuffeling
print(
"--------------------------- Running Cross-Validation - "
+ mode
+ ", mode: "
+ method
+ " ---------------------------"
)
print("-> Starting 5-folds CV - Shuffle: " + str(shuffle))
arr_f1_weighted = np.array([])
arr_f1_macro = np.array([])
arr_f1_positive = np.array([])
arr_recall = np.array([])
arr_precision = np.array([])
prediction_folds = []
preds_folds = []
y_folds = []
stacked_models = []
index_column = []
if mode == "regression":
skf = KFold(n_splits=5)
else:
if shuffle == False:
skf = StratifiedKFold(n_splits=5, shuffle=shuffle)
else:
skf = StratifiedKFold(n_splits=5, random_state=2, shuffle=shuffle)
for train_index, test_index in tqdm(skf.split(X, y)):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
clf = lgbm(X_train, y_train, X_test, y_test, iterations, params)
preds = clf.predict(X_test)
predictions = []
predictions = adjusted_classes(preds, threshold)
stacked_models.append(clf)
index_column.extend(X_test.index.values.tolist())
""" Multiclass
predictions = clf.predict(X_test)
predictions_classes = []
for i in predictions:
print (np.argmax(i))
predictions_classes.append(np.argmax(i))
"""
if mode == | |
dict((x.residue_number, x.residue_class) for x in self.shx.residues.all_residues)
class RESI():
def __init__(self, shx, spline: list):
"""
RESI class[ ] number[0] alias
"""
self.shx = shx
self.residue_class = ''
self.residue_number = 0
self.alias = None
self.chain_id = None
self.textline = ' '.join(spline)
if len(spline) < 2 and DEBUG:
print('*** Wrong RESI definition found! Check your RESI instructions ***')
raise ParseParamError
self.get_resi_definition(spline)
if self.residue_number < -999 or self.residue_number > 9999:
print('*** Invalid residue number given. ****')
raise ParseSyntaxError
def get_resi_definition(self, resi: list) -> tuple:
"""
RESI class[ ] number[0] alias
Returns the residue number and class of a string like 'RESI TOL 1'
or 'RESI 1 TOL'
Residue names may now begin with a digit.
They must however contain at least one letter
Allowed residue numbers is now from -999 to 9999 (2017/1)
"""
alpha = re.compile('[a-zA-Z]')
for x in resi:
if alpha.search(x):
if ':' in x:
# contains ":" thus must be a chain-id+number
self.chain_id, self.residue_number = x.split(':')[0], int(x.split(':')[1])
else:
# contains letters, must be a name (class)
self.residue_class = x
else:
# everything else can only be a number
if self.residue_number > 0:
self.alias = int(x)
else:
try:
self.residue_number = int(x)
except ValueError:
self.residue_number = 0
return self.residue_class, self.residue_number, self.chain_id, self.alias
def _parse_line(self, spline, intnums=False):
"""
:param spline: Splitted shelxl line
:param intnums: if numerical parameters should be integer
:return: numerical parameters and words
"""
if '_' in spline[0]:
self.card_name, suffix = spline[0].upper().split('_')
if any([x.isalpha() for x in suffix]):
self.residue_class = suffix
else:
# TODO: implement _+, _- and _*
self.residue_number = int(suffix)
else:
self.card_name = spline[0].upper()
numparams = []
words = []
for x in spline[1:]: # all values after SHELX card
if str.isdigit(x[0]) or x[0] in '+-':
if intnums:
numparams.append(int(x))
else:
numparams.append(float(x))
else:
words.append(x)
return numparams, words
@property
def index(self):
return self.shx.index_of(self)
def __iter__(self):
for x in self.__repr__().split():
yield x
def split(self):
return self.textline.split()
def __str__(self):
return self.textline
def __repr__(self):
return self.textline
def __bool__(self):
if self.residue_number > 0:
return True
else:
return False
class PART(Command):
def __init__(self, shx, spline: list):
"""
PART n sof
"""
super(PART, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
self.sof = 11.0
self.n = 0
try:
self.n = int(p[0])
except(ValueError, IndexError):
if DEBUG:
print('*** Wrong PART definition in line {} found! '
'Check your PART instructions ***'.format(shx.error_line_num))
raise
self.n = 0
if len(p) > 1:
self.sof = float(p[1])
def __bool__(self):
if self.n > 0:
return True
else:
return False
class XNPD(Command):
def __init__(self, shx, spline: list):
"""
XNPD Umin[-0.001]
"""
super(XNPD, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
self.Umin = -0.001
if len(p) > 0:
self.Umin = p[0]
class SIZE(Command):
def __init__(self, shx, spline: list):
"""
SIZE dx dy dz
"""
super(SIZE, self).__init__(shx, spline)
self.dx, self.dy, self.dz = None, None, None
p, _ = self._parse_line(spline)
if len(p) > 0:
self.dx = p[0]
if len(p) > 1:
self.dy = p[1]
if len(p) > 2:
self.dz = p[2]
def _as_text(self):
if all([self.dx, self.dy, self.dz]):
return "SIZE {:,g} {:,g} {:,g}".format(self.dx, self.dy, self.dz)
else:
return ""
def __repr__(self):
return self._as_text()
def __str__(self):
return self._as_text()
class SHEL(Command):
def __init__(self, shx, spline: list):
"""
SHEL lowres[infinite] highres[0]
"""
super(SHEL, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
if len(params) > 0:
self.lowres = params[0]
if len(params) > 1:
self.highres = params[1]
class WIGL(Command):
def __init__(self, shx, spline: list):
"""
WIGL del[0.2] dU[0.2]
"""
super(WIGL, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
self.d = 0.2
self.dU = 0.2
if len(p) > 0:
self.d = p[0]
if len(p) > 1:
self.dU = p[1]
class WPDB(Command):
def __init__(self, shx, spline: list):
"""
WPDB n[1]
"""
super(WPDB, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
self.n = 1
if len(p) > 0:
self.n = p[0]
class SPEC(Command):
def __init__(self, shx, spline: list):
"""
SPEC d[0.2]
"""
super(SPEC, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
if len(p) > 0:
self.d = p[0]
class STIR(Command):
def __init__(self, shx, spline: list):
"""
STIR sres step[0.01]
"""
super(STIR, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
self.step = 0.01
if len(p) > 0:
self.sres = p[0]
if len(p) > 1:
self.step = p[1]
def __repr__(self):
return "STIR {} {}".format(self.sres if self.sres else '', self.step)
def __str__(self):
return "STIR {} {}".format(self.sres if self.sres else '', self.step)
class TWST(Command):
def __init__(self, shx, spline: list):
"""
TWST N[0] (N[1] after SHELXL-2018/3)
Twin component number to be used for the completeness and Friedel completeness statistics.
"""
super(TWST, self).__init__(shx, spline)
p, _ = self._parse_line(spline)
self.N = 1
if len(p) > 0:
self.N = p[0]
class RTAB(Command):
def __init__(self, shx, spline: list):
"""
RTAB codename atomnames
"""
super(RTAB, self).__init__(shx, spline)
self.code = spline.pop(1)
_, self.atoms = self._parse_line(spline)
class PRIG(Command):
def __init__(self, shx, spline: list):
"""
PRIG p[#]
"""
super(PRIG, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
if len(params) > 0:
self.p = params[0]
class PLAN(Command):
def __init__(self, shx, spline: list):
"""
PLAN npeaks[20] d1[#] d2[#]
"""
super(PLAN, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
if len(params) > 0:
self.npeaks = params[0]
if len(params) > 1:
self.d1 = params[1]
if len(params) > 2:
self.d2 = params[2]
class FRAG(Command):
def __init__(self, shx, spline: list):
"""
FRAG code[17] a[1] b[1] c[1] α[90] β[90] γ[90]
"""
super(FRAG, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
self.code = params[0]
self.cell = params[1:7]
class FREE(Command):
def __init__(self, shx, spline: list):
"""
FREE atom1 atom2
"""
super(FREE, self).__init__(shx, spline)
_, atoms = self._parse_line(spline)
try:
self.atom1 = atoms[0]
self.atom2 = atoms[1]
except IndexError:
raise ParseParamError
class FMAP(Command):
"""
FMAP code[2] axis[#] nl[53]
"""
def __init__(self, shx, spline: list):
super(FMAP, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
if len(params) > 0:
self.code = params[0]
if len(params) > 1:
self.axis = params[1]
if len(params) > 2:
self.axis = params[2]
class MOVE(Command):
def __init__(self, shx, spline: list):
"""
MOVE dx[0] dy[0] dz[0] sign[1]
"""
super(MOVE, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
if len(params) > 2:
self.dxdydz = params[:3]
if len(params) > 3:
self.sign = params[3]
class MERG(Command):
def __init__(self, shx, spline: list):
"""
MERG n[2]
"""
super(MERG, self).__init__(shx, spline)
self.n = None
_n, _ = self._parse_line(spline)
if len(_n) > 0:
self.n = _n[0]
class HTAB(Command):
def __init__(self, shx, spline: list):
"""
HTAB dh[2.0]
HTAB donor-atom acceptor-atom
"""
super(HTAB, self).__init__(shx, spline)
self.dh = None
dh, atoms = self._parse_line(spline)
if dh:
self.dh = dh[0]
if len(atoms) == 2:
self.donor = atoms[0]
self.acceptor = atoms[1]
class GRID(Command):
def __init__(self, shx, spline: list):
"""
GRID sl[#] sa[#] sd[#] dl[#] da[#] dd[#]
"""
super(GRID, self).__init__(shx, spline)
params, _ = self._parse_line(spline)
if len(params) > 0:
self.sl = params[0]
if len(params) > 1:
self.sa = params[1]
if len(params) > 2:
self.sd = params[2]
if len(params) > 3:
self.dl = params[3]
if len(params) > 4:
self.da = params[4]
if len(params) > 5:
self.dd = params[5]
class ACTA(Command):
def __init__(self, shx, spline: list):
"""
ACTA 2θfull[#]
"""
super(ACTA, self).__init__(shx, spline)
self.twotheta, _ = self._parse_line(spline)
self.shx = shx
def _as_str(self):
if self.twotheta:
return "ACTA {:,g}".format(self.twotheta[0])
else:
return "ACTA"
def __repr__(self):
return self._as_str()
def __str__(self):
return self._as_str()
class BLOC(Command):
def __init__(self, shx, spline: list):
"""
BLOC n1 n2 atomnames
"""
super(BLOC, self).__init__(shx, spline)
params, self.atoms = self._parse_line(spline)
if len(params) > 1:
self.n2 = params[1]
if len(params) > 0:
self.n1 = params[0]
self.shx = shx
class FVAR():
def __init__(self, number: int = 1, value: float = 0.0):
"""
FVAR osf[1] free variables
"""
self.fvar_value = value # value
self.number = number # occurence inside of FVAR instructions
self.usage = 1
def __str__(self):
return str(float(self.fvar_value))
def __repr__(self):
return str(float(self.fvar_value))
class FVARs():
def __init__(self, shx):
super(FVARs, self).__init__()
self.fvars = [] # free variables
self.shx = shx
self._fvarline = 0
def __iter__(self):
"""
Must be defined for __repr__() to work.
"""
for x in self.fvars:
yield x
def __getitem__(self, item: int) -> str:
# SHELXL counts fvars from 1 to x:
item = abs(item) - 1
return self.fvars[item].fvar_value
def __setitem__(self, key, fvar_value):
self.fvars[key] = fvar_value
def __len__(self) -> int:
return len(self.fvars)
def __str__(self) -> str:
# returnes FVAR as list of FVAR instructions with seven numbers in one line
lines = chunks(self.as_stringlist, 7)
fvars = [' '.join(i) for i in lines]
fvars = ['FVAR ' + i for i in fvars]
| |
'Ↄ', '&conbase;': 'ↄ',
'&denl;': '\ueee3', '&dscap;': 'ᴅ', 'đ': 'đ', 'Đ': 'Đ',
'&dovlmed;': '\ue491', '&dtailstrok;': 'ꝱ', '&dtail;': 'ɖ',
'&dscapdot;': '\uebd2', '&ddotbl;': 'ḍ', '&Ddotbl;': 'Ḍ',
'&dscapdotbl;': '\uef26', '&ddot;': 'ḋ', '&Ddot;': 'Ḋ',
'&dacute;': '\ue477', '&Dacute;': '\ue077', 'ð': 'ð',
'Ð': 'Ð', 'ðenl;': '\ueee5', 'ðscap;': 'ᴆ',
'ðdotbl;': '\ue48f', 'Ðdotbl;': '\ue08f',
'&Dovlhigh;': '\uf7b6', '&drotdrotlig;': '\ueec6', '&Drot;': 'Ꝺ',
'&drot;': 'ꝺ', '&drotdot;': '\uebd1', '&drotacute;': '\uebb2',
'&drotenl;': '\ueee4', '&dscript;': 'ẟ', '&dcurl;': '\uf193',
'&eenl;': '\ueee6', '&escap;': 'ᴇ', 'ę': 'ę', 'Ę': 'Ę',
'&ecurl;': '\ue4e9', '&Ecurl;': '\ue0e9', '&eogoncurl;': '\uebf3',
'&Eogoncurl;': '\uebf2', '&edotbl;': 'ẹ', '&Edotbl;': 'Ẹ',
'&eogondot;': '\ue4eb', '&Eogondot;': '\ue0eb',
'&eogondotbl;': '\ue4e8', '&Eogondotbl;': '\ue0e8',
'&eogonenl;': '\ueaf3', 'ė': 'ė', 'Ė': 'Ė', 'ë': 'ë',
'Ë': 'Ë', 'ëmacr;': '\ue4cd', 'é': 'é',
'É': 'É', '&eogonacute;': '\ue499', '&Eogonacute;': '\ue099',
'&edotblacute;': '\ue498', '&edblac;': '\ue4d1', '&Edblac;': '\ue0d1',
'&edotacute;': '\ue4c8', '&Edotacute;': '\ue0c8',
'&eogondotacute;': '\ue4ec', '&Eogondotacute;': '\ue0ec',
'&eogondblac;': '\ue4ea', '&Eogondblac;': '\ue0ea', 'è': 'è',
'È': 'È', 'ê': 'ê', 'Ê': 'Ê',
'&eogoncirc;': '\ue49f', '&ering;': '\ue4cf', '&ebreve;': 'ĕ',
'&Ebreve;': 'Ĕ', 'ē': 'ē', 'Ē': 'Ē',
'&eogonmacr;': '\ue4bc', '&Eogonmacr;': '\ue0bc',
'&emacrbreve;': '\ue4b7', '&Emacrbreve;': '\ue0b7',
'&emacracute;': 'ḗ', '&Emacracute;': 'Ḗ', '&eylig;': '\ueec7',
'&eacombcirc;': '\uebbd', '&eucombcirc;': '\uebbe',
'&easup;': '\ue4e1', '&Easup;': '\ue0e1', '&eesup;': '\ue8e2',
'&eisup;': '\ue4e2', '&eosup;': '\ue8e3', '&evsup;': '\ue4e3',
'&schwa;': 'ə', '&Eunc;': '\uf10a', '&Euncclose;': '\uf217',
'&eunc;': '\uf218', '&eext;': '\uf219', '&etall;': '\uf21a',
'&fenl;': '\ueee7', '&fscap;': 'ꜰ', '&fdotbl;': '\ue4ee',
'&Fdotbl;': '\ue0ee', '&fdot;': 'ḟ', '&Fdot;': 'Ḟ',
'&fscapdot;': '\uebd7', '&facute;': '\ue4f0', '&Facute;': '\ue0f0',
'&faumllig;': '\ueec8', 'ff': 'ff', 'fi': 'fi',
'fj': '\ueec9', '&foumllig;': '\uf1bc', 'fl': 'fl',
'&frlig;': '\ueeca', '&ftlig;': '\ueecb', '&fuumllig;': '\ueecc',
'&fylig;': '\ueecd', 'ffi': 'ffi', 'ffl': 'ffl',
'&fftlig;': '\ueece', '&ffylig;': '\ueecf', '&ftylig;': '\ueed0',
'&fturn;': 'ⅎ', '&Fturn;': 'Ⅎ', '&Frev;': 'ꟻ', '&fins;': 'ꝼ',
'&Fins;': 'Ꝼ', '&finsenl;': '\ueeff', '&finsdot;': '\uebd4',
'&Finsdot;': '\uebd3', '&finsdothook;': '\uf21c',
'&finssemiclose;': '\uf21b', '&finssemiclosedot;': '\uebd5',
'&finsclose;': '\uf207', '&finsclosedot;': '\uebd6',
'&finsdotbl;': '\ue7e5', '&Finsdotbl;': '\ue3e5',
'&finsacute;': '\uebb4', '&Finsacute;': '\uebb3', '&fcurl;': '\uf194',
'&genl;': '\ueee8', '&gscap;': 'ɢ', '&gstrok;': 'ǥ', '&Gstrok;': 'Ǥ',
'&gdotbl;': '\ue501', '&Gdotbl;': '\ue101', '&gscapdotbl;': '\uef27',
'ġ': 'ġ', 'Ġ': 'Ġ', '&gscapdot;': '\uef20', '&Gacute;': 'Ǵ',
'ǵ': 'ǵ', '&gglig;': '\ueed1', '&gdlig;': '\ueed2',
'&gdrotlig;': '\ueed3', '&gethlig;': '\ueed4', '&golig;': '\ueede',
'&gplig;': '\uead2', '&grlig;': '\uead0', '&gins;': 'ᵹ',
'&Gins;': 'Ᵹ', '&ginsturn;': 'ꝿ', '&Ginsturn;': 'Ꝿ',
'&Gsqu;': '\uf10e', '&gdivloop;': '\uf21d', '&glglowloop;': '\uf21e',
'&gsmlowloop;': '\uf21f', '&gopen;': 'ɡ', '&gcurl;': '\uf196',
'&henl;': '\ueee9', '&hscap;': 'ʜ', '&hhook;': 'ɦ', 'ħ': 'ħ',
'&hovlmed;': '\ue517', '&hdotbl;': 'ḥ', '&Hdotbl;': 'Ḥ',
'&Hdot;': 'ḣ', '&hdot;': 'Ḣ', '&hscapdot;': '\uebda',
'&hacute;': '\ue516', '&Hacute;': '\ue116', '&hwair;': 'ƕ',
'&HWAIR;': 'Ƕ', '&hslonglig;': '\uebad', '&hslongligbar;': '\ue7c7',
'&hrarmlig;': '\ue8c3', '&Hrarmlig;': '\ue8c2', '&hhalf;': 'ⱶ',
'&Hhalf;': 'Ⱶ', '&Hunc;': '\uf110', '&hrdes;': '\uf23a',
'&ienl;': '\ueeea', '&iscap;': 'ɪ', 'ı': 'ı',
'&inodotenl;': '\ueefd', 'İ': 'İ', '&istrok;': 'ɨ',
'&idblstrok;': '\ue8a1', 'į': 'į', '&inodotogon;': '\ue8dd',
'Į': 'Į', '&icurl;': '\ue52a', '&Icurl;': '\ue12a',
'&idotbl;': 'ị', '&Idotbl;': 'Ị', '&ibrevinvbl;': '\ue548',
'ï': 'ï', 'Ï': 'Ï', 'í': 'í', 'Í': 'Í',
'&idblac;': '\ue543', '&Idblac;': '\ue143', '&idotacute;': '\uebf7',
'&Idotacute;': '\uebf6', 'ì': 'ì', 'Ì': 'Ì',
'î': 'î', 'Î': 'Î', '&ihook;': 'ỉ', '&Ihook;': 'Ỉ',
'&ibreve;': 'ĭ', '&Ibreve;': 'Ĭ', 'ī': 'ī', 'Ī': 'Ī',
'&iovlmed;': '\ue550', '&Iovlhigh;': '\ue150',
'&imacrbreve;': '\ue537', '&Imacrbreve;': '\ue137',
'&imacracute;': '\ue535', '&Imacracute;': '\ue135', 'ij': 'ij',
'IJ': 'IJ', '&iasup;': '\ue8e4', '&iosup;': '\ue8e5',
'&iusup;': '\ue8e6', '&ivsup;': '\ue54b', '&ilong;': '\uf220',
'&Ilong;': 'ꟾ', '&jenl;': '\ueeeb', '&jscap;': 'ᴊ', '&jnodot;': 'ȷ',
'&jnodotenl;': '\ueefe', '&Jdot;': '\ue15c', '&jnodotstrok;': 'ɟ',
'&jbar;': 'ɉ', '&jdblstrok;': '\ue8a2', '&Jbar;': 'Ɉ',
'&jcurl;': '\ue563', '&Jcurl;': '\ue163', '&juml;': '\uebe3',
'&Juml;': '\uebe2', '&jdotbl;': '\ue551', '&Jdotbl;': '\ue151',
'&jacute;': '\ue553', '&Jacute;': '\ue153', '&jdblac;': '\ue562',
'&Jdblac;': '\ue162', '&jmacrmed;': '\ue554', '&jovlmed;': '\ue552',
'&Jmacrhigh;': '\ue154', '&Jovlhigh;': '\ue152', '&jesup;': '\ue8e7',
'&kenl;': '\ueeec', '&kscap;': 'ᴋ', '&khook;': 'ƙ', '&kbar;': 'ꝁ',
'&Kbar;': 'Ꝁ', '&kovlmed;': '\ue7c3', '&kstrleg;': 'ꝃ',
'&Kstrleg;': 'Ꝃ', '&kstrascleg;': 'ꝅ', '&Kstrascleg;': 'Ꝅ',
'&kdot;': '\ue568', '&Kdot;': '\ue168', '&kscapdot;': '\uebdb',
'&kdotbl;': 'ḳ', '&Kdotbl;': 'Ḳ', '&kacute;': 'ḱ', '&Kacute;': 'Ḱ',
'&kslonglig;': '\uebae', '&kslongligbar;': '\ue7c8',
'&krarmlig;': '\ue8c5', '&kunc;': '\uf208', '&ksemiclose;': '\uf221',
'&kclose;': '\uf209', '&kcurl;': '\uf195', '&lenl;': '\ueeed',
'&lscap;': 'ʟ', '&lbar;': 'ƚ', 'ł': 'ł', 'Ł': 'Ł',
'&lhighstrok;': 'ꝉ', '&Lhighstrok;': 'Ꝉ', '&lovlmed;': '\ue5b1',
'<ailstrok;': 'ꝲ', '&ldotbl;': 'ḷ', '&Ldotbl;': 'Ḷ',
'&lscapdotbl;': '\uef28', '&ldot;': '\ue59e', '&Ldot;': '\ue19e',
'&lscapdot;': '\uebdc', 'ĺ': 'ĺ', 'Ĺ': 'Ĺ',
'&lringbl;': '\ue5a4', '&lmacrhigh;': '\ue596',
'&lovlhigh;': '\ue58c', '&Lovlhigh;': '\uf7b4', '&lbrk;': 'ꝇ',
'&Lbrk;': 'Ꝇ', '&llwelsh;': 'ỻ', '&LLwelsh;': 'Ỻ',
'&lllig;': '\uf4f9', '&ldes;': '\uf222', '<urn;': 'ꞁ',
'&Lturn;': 'Ꞁ', '&menl;': '\ueeee', '&mscap;': 'ᴍ',
'&mtailstrok;': 'ꝳ', '&mdotbl;': 'ṃ', '&Mdotbl;': 'Ṃ',
'&mscapdotbl;': '\uef29', '&mdot;': 'ṁ', '&Mdot;': 'Ṁ',
'&mscapdot;': '\uebdd', '&macute;': 'ḿ', '&Macute;': 'Ḿ',
'&mringbl;': '\ue5c5', '&mmacrmed;': '\ue5b8',
'&Mmacrhigh;': '\ue1b8', '&movlmed;': '\ue5d2',
'&Movlhigh;': '\ue1d2', '&mesup;': '\ue8e8', '&Minv;': 'ꟽ',
'&mturn;': 'ɯ', '&Mturn;': 'Ɯ', '&munc;': '\uf23c',
'&mmedunc;': '\uf225', '&Munc;': '\uf11a', '&mrdes;': '\uf223',
'&muncdes;': '\uf23d', '&mmeduncdes;': '\uf226',
'&Muncdes;': '\uf224', '&muncacute;': '\uf23e',
'&mmeduncacute;': '\uebb6', '&Muncacute;': '\uebb5', '&M5leg;': 'ꟿ',
'&nenl;': '\ueeef', '&nscap;': 'ɴ', '&nscapldes;': '\uf22b',
'&nlrleg;': 'ƞ', '&nlfhook;': 'ɲ', '&nbar;': '\ue7b2',
'&ntailstrok;': 'ꝴ', '&ndot;': 'ṅ', '&Ndot;': 'Ṅ',
'&nscapdot;': '\uef21', 'ń': 'ń', 'Ń': 'Ń',
'&ndotbl;': 'ṇ', '&Ndotbl;': 'Ṇ', '&nscapdotbl;': '\uef2a',
'&ncirc;': '\ue5d7', 'ñ': 'ñ', 'Ñ': 'Ñ',
'&nringbl;': '\ue5ee', '&nmacrmed;': '\ue5dc',
'&Nmacrhigh;': '\ue1dc', 'ŋ': 'ŋ', 'Ŋ': 'Ŋ',
'&nscapslonglig;': '\ueed5', '&nrdes;': '\uf228', '&Nrdes;': '\uf229',
'&nscaprdes;': '\uf22a', '&nflour;': '\uf19a', '&oenl;': '\ueef0',
'&oscap;': 'ᴏ', 'º': 'º', '&oogon;': 'ǫ', '&Oogon;': 'Ǫ',
'&ocurl;': '\ue7d3', '&Ocurl;': '\ue3d3', '&oogoncurl;': '\ue64f',
'&Oogoncurl;': '\ue24f', '&ocurlacute;': '\uebb8',
'&Ocurlacute;': '\uebb7', 'ø': 'ø', 'Ø': 'Ø',
'øcurl;': '\ue7d4', 'Øcurl;': '\ue3d4',
'øogon;': '\ue655', 'Øogon;': '\ue255', '&odotbl;': 'ọ',
'&Odotbl;': 'Ọ', 'ødotbl;': '\uebe1', 'Ødotbl;': '\uebe0',
'⊙': 'ȯ', '&Odot;': 'Ȯ', '&oogondot;': '\uebdf',
'&Oogondot;': '\uebde', '&oogonmacr;': 'ǭ', '&Oogonmacr;': 'Ǭ',
'ødot;': '\uebce', 'Ødot;': '\uebcd',
'&oogondotbl;': '\ue608', '&Oogondotbl;': '\ue208', 'ö': 'ö',
'Ö': 'Ö', '&odiaguml;': '\ue8d7', 'öacute;': '\ue62c',
'ó': 'ó', 'Ó': 'Ó', 'øacute;': 'ǿ',
'Øacute;': 'Ǿ', 'ødblac;': '\uebc7',
'Ødblac;': '\uebc6', '&oogonacute;': '\ue60c',
'&Oogonacute;': '\ue20c', 'øogonacute;': '\ue657',
'Øogonacute;': '\ue257', 'ő': 'ő', 'Ő': 'Ő',
'&odotacute;': '\uebf9', '&Odotacute;': '\uebf8',
'&oogondotacute;': '\uebfb', '&Oogondotacute;': '\uebfa',
'ødotacute;': '\uebfd', 'Ødotacute;': '\uebfc',
'&oogondblac;': '\uebc5', '&Oogondblac;': '\uebc4', 'ò': 'ò',
'Ò': 'Ò', 'ô': 'ô', 'Ô': 'Ô',
'öcirc;': '\ue62d', 'Öcirc;': '\ue22d',
'&oogoncirc;': '\ue60e', '&ocar;': 'ǒ', '&Ocar;': 'Ǒ',
'õ': 'õ', 'Õ': 'Õ', '&oring;': '\ue637', '&ohook;': 'ỏ',
'&Ohook;': 'Ỏ', '&obreve;': 'ŏ', '&Obreve;': 'Ŏ',
'øbreve;': '\uebef', 'Øbreve;': '\uebee', 'ō': 'ō',
'Ō': 'Ō', 'ømacr;': '\ue652', 'Ømacr;': '\ue252',
'&omacrbreve;': '\ue61b', '&Omacrbreve;': '\ue21b',
'ømacrbreve;': '\ue653', 'Ømacrbreve;': '\ue253',
'&omacracute;': 'ṓ', '&Omacracute;': 'Ṓ',
'ømacracute;': '\uebed', 'Ømacracute;': '\uebec',
'ömacr;': 'ȫ', 'Ömacr;': 'Ȫ', '&oclig;': '\uefad',
'œ': 'œ', 'Œ': 'Œ', '&oeligscap;': 'ɶ',
'&oeligenl;': '\uefdd', '&oeligogon;': '\ue662',
'&OEligogon;': '\ue262', '&Oloop;': 'Ꝍ', '&oloop;': 'ꝍ',
'&oeligacute;': '\ue659', '&OEligacute;': '\ue259',
'&oeligdblac;': '\uebc9', '&OEligdblac;': '\uebc8',
'&oeligmacr;': '\ue65d', '&OEligmacr;': '\ue25d',
'&oeligmacrbreve;': '\ue660', '&OEligmacrbreve;': '\ue260',
'&oolig;': 'ꝏ', '&OOlig;': 'Ꝏ', '&ooliguml;': '\uebe5',
'&OOliguml;': '\uebe4', '&ooligacute;': '\uefe9',
'&OOligacute;': '\uefe8', '&ooligdblac;': '\uefed',
'&OOligdblac;': '\uefec', '&ooligdotbl;': '\ueffd',
'&OOligdotbl;': '\ueffc', '&orrotlig;': '\ue8de', '&oasup;': '\ue643',
'&oesup;': '\ue644', '&Oesup;': '\ue244', '&oisup;': '\ue645',
'&oosup;': '\ue8e9', '&ousup;': '\ue646', '&Ousup;': '\ue246',
'&ovsup;': '\ue647', '&oopen;': 'ɔ', '&oopenmacr;': '\ue7cc',
'&penl;': '\ueef1', '&pscap;': 'ᴘ', '&pbardes;': 'ꝑ',
'&Pbardes;': 'Ꝑ', '&pflour;': 'ꝓ', '&Pflour;': 'Ꝓ',
'&psquirrel;': 'ꝕ', '&Psquirrel;': 'Ꝕ', '&pdotbl;': '\ue66d',
'&Pdotbl;': '\ue26d', '&pdot;': 'ṗ', '&Pdot;': 'Ṗ',
'&pscapdot;': '\uebcf', '&pacute;': 'ṕ', '&Pacute;': 'Ṕ',
'&pdblac;': '\ue668', '&Pdblac;': '\ue268', '&pmacr;': '\ue665',
'&pplig;': '\ueed6', '&PPlig;': '\ueedd', '&ppflourlig;': '\ueed7',
'&ppliguml;': '\uebe7', '&PPliguml;': '\uebe6', '&Prev;': 'ꟼ',
'&qenl;': '\ueef2', '&qscap;': '\uef0c', '&qslstrok;': 'ꝙ',
'&Qslstrok;': 'Ꝙ', '&qbardes;': 'ꝗ', '&Qbardes;': 'Ꝗ',
'&qbardestilde;': '\ue68b', '&q2app;': '\ue8b3', '&q3app;': '\ue8bf',
'&qcentrslstrok;': '\ue8b4', '&qdotbl;': '\ue688',
'&Qdotbl;': '\ue288', '&qdot;': '\ue682', '&Qdot;': '\ue282',
'&qmacr;': '\ue681', '&qvinslig;': '\uead1', '&Qstem;': '\uf22c',
'&renl;': '\ueef3', '&rscap;': 'ʀ', '&YR;': 'Ʀ', '&rdes;': 'ɼ',
'&rdesstrok;': '\ue7e4', '&rtailstrok;': 'ꝵ', '&rscaptailstrok;': 'ꝶ',
'&Rtailstrok;': '℞', '&Rslstrok;': '℟', '&rdotbl;': 'ṛ',
'&Rdotbl;': 'Ṛ', '&rdot;': 'ṙ', '&Rdot;': 'Ṙ', '&rscapdot;': '\uef22',
'ŕ': 'ŕ', 'Ŕ': 'Ŕ', '&rringbl;': '\ue6a3',
'&rscapdotbl;': '\uef2b', '&resup;': '\ue8ea', '&rrot;': 'ꝛ',
'&Rrot;': 'Ꝛ', '&rrotdotbl;': '\ue7c1', '&rrotacute;': '\uebb9',
'&rins;': 'ꞃ', '&Rins;': 'Ꞃ', '&rflour;': '\uf19b',
'&senl;': '\ueef4', '&sscap;': 'ꜱ', '⋅': 'ṡ', '&Sdot;': 'Ṡ',
'&sscapdot;': '\uef23', 'ś': 'ś', 'Ś': 'Ś',
'&sdotbl;': 'ṣ', '&Sdotbl;': 'Ṣ', '&sscapdotbl;': '\uef2c',
'ß': 'ß', '&SZlig;': 'ẞ', '&slongaumllig;': '\ueba0',
'&slongchlig;': '\uf4fa', '&slonghlig;': '\ueba1',
'&slongilig;': '\ueba2', '&slongjlig;': '\uf4fb',
'&slongklig;': '\uf4fc', '&slongllig;': '\ueba3',
'&slonglbarlig;': '\ue8df', '&slongoumllig;': '\ueba4',
'&slongplig;': '\ueba5', '&slongslig;': '\uf4fd',
'&slongslonglig;': '\ueba6', '&slongslongilig;': '\ueba7',
'&slongslongklig;': '\uf4fe', '&slongslongllig;': '\ueba8',
'&slongslongtlig;': '\uf4ff', '&stlig;': 'st', '&slongtlig;': 'ſt',
'&slongtilig;': '\ueba9', '&slongtrlig;': '\uebaa',
'&slonguumllig;': '\uebab', '&slongvinslig;': '\uebac',
'&slongdestlig;': '\ueada', '&slong;': 'ſ', '&slongenl;': '\ueedf',
'&slongbarslash;': 'ẜ', '&slongbar;': 'ẝ', '&slongovlmed;': '\ue79e',
'&slongslstrok;': '\ue8b8', '&slongflour;': '\ue8b7',
'&slongacute;': '\uebaf', '&slongdes;': '\uf127',
'&slongdotbl;': '\ue7c2', '&Sclose;': '\uf126', '&sclose;': '\uf128',
'&sins;': 'ꞅ', '&Sins;': 'Ꞅ', '&tenl;': '\ueef5', '&tscap;': 'ᴛ',
'&ttailstrok;': 'ꝷ', '&togon;': '\ue6ee', '&Togon;': '\ue2ee',
'&tdotbl;': 'ṭ', '&Tdotbl;': 'Ṭ', '⃛': 'ṫ', '&Tdot;': 'Ṫ',
'&tscapdot;': '\uef24', '&tscapdotbl;': '\uef2d',
'&tacute;': '\ue6e2', '&Tacute;': '\ue2e2', '&trlig;': '\ueed8',
'&ttlig;': '\ueed9', '&trottrotlig;': '\ueeda', '&tylig;': '\ueedb',
'&tzlig;': '\ueedc', '&trot;': 'ꞇ', '&Trot;': 'Ꞇ',
'&tcurl;': '\uf199', '&uenl;': '\ueef7', '&uscap;': 'ᴜ',
'&ubar;': 'ʉ', 'ų': 'ų', 'Ų': 'Ų', '&ucurl;': '\ue731',
'&Ucurl;': | |
corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if self.sip is None and \
self.cpdis1 is None and self.cpdis2 is None and \
self.det2im1 is None and self.det2im2 is None:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {0:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, accuracy=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (Default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (Default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (Default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (Default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (Default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
| |
#!/usr/bin/env python2
# -*- coding:utf8 -*-
"""
A simple Python 2.7+ / 3.4+ script to send a text message to a Free Mobile phone.
- Warning: it only works in France to a French number, using the mobile operator Free Mobile.
- Warning: some initial configuration is required before running this script (see the error messages).
- Activate it on: https://mobile.free.fr/account/mes-options/notifications-sms
- Copyright 2014-20 <NAME>
- License MIT.
Examples
--------
$ FreeSMS.py --help
Gives help
$ FreeSMS.py "I like using Python to send SMS to myself from my laptop -- and it's free thanks to Free Mobile !"
Will send a test message to your mobile phone.
- Last version? Take a look to the latest version at https://github.com/Naereen/FreeSMS.py
- Initial Copyright : José - Juin 2014 (http://eyesathome.free.fr/index.php/tag/freemobile/)
- License:
MIT License
Copyright (c) 2020 <NAME> (Naereen), https://github.com/Naereen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
# Use sys.version to be compatible with Python 2
import sys
# Use os.getenv to see try to emulate os.path.expanduser if needed
import os
# Use time to sleep and get string for today current hour
import time
# Use JSON to pretty print a dictionary
import json
# Use base64 to not keep plaintext files of the number, username and password in your home
import base64
today = time.strftime("%H:%M:%S %Y-%m-%d")
try:
from os.path import expanduser
except ImportError:
print("Warning, os.path.expanduser is not available, trying to use os.getenv('USER') = {} ...".format(os.getenv("USER")))
def expanduser(s):
""" Try to simulate the os.path.expanduser function. """
return '/home/' + os.getenv("USER") + '/' + s
if sys.version_info < (3, 0):
from urllib import urlencode
from urllib2 import urlopen, HTTPError
else:
from urllib3.request import urlencode
from urllib.request import urlopen
from urllib.error import HTTPError
try:
try:
from ansicolortags import printc
except ImportError:
print("Optional dependancy (ansicolortags) is not available, using regular print function.")
print(" You can install it with : 'pip install ansicolortags' (or sudo pip)...")
from ANSIColors import printc
except ImportError:
print("Optional dependancy (ANSIColors) is not available, using regular print function.")
print(" You can install it with : 'pip install ANSIColors-balises' (or sudo pip)...")
def printc(*a, **kw):
""" Fake function printc.
ansicolortags or ANSIColors are not installed...
Install ansicolortags from pypi (with 'pip install ansicolortags')
"""
print(*a, **kw)
def testSpecialFile(name, number=''):
""" Test if the hidden file '~/.smsapifreemobile_name.b64' exists and decodes (base64) correctly.
"""
assert name in ["number", "user", "password"], "Error: unknown or incorrect value for 'name' for the function openSpecialFile(name) ..."
# printc("<cyan>Testing the hidden file <white>'<u>~/.smsapifreemobile_{}.b64<U>'<cyan>...<white>".format(name)) # DEBUG
try:
with open(expanduser('~/') + ".smsapifreemobile_" + name + number + ".b64") as f:
variable = base64.b64decode(f.readline()[:-1])
while variable[-1] == '\n':
variable = variable[:-1]
return True
except OSError:
return False
def openSpecialFile(name, number=''):
""" Open the hidden file '~/.smsapifreemobile_name.b64', read and decode (base64) and return its content.
"""
assert name in ["number", "user", "password"], "Error: unknown or incorrect value for 'name' for the function openSpecialFile(name) ..."
printc("<cyan>Opening the hidden file <white>'<u>~/.smsapifreemobile_{}.b64<U>'<cyan>, read and decode (base64) and return its content...<white>".format(name))
try:
with open(expanduser('~/') + ".smsapifreemobile_" + name + number + ".b64") as f:
variable = base64.b64decode(f.readline()[:-1])
while variable[-1] == '\n':
variable = variable[:-1]
return variable
except OSError:
printc("<red>Error: unable to read the file '~/.smsapifreemobile_{}.b64' ...<white>".format(name))
printc("<yellow>Please check that it is present, and if it not there, create it:<white>")
if name == "number":
print("To create '~/.smsapifreemobile_number.b64', use your phone number (like '0612345678', not with +33), and execute this command line (in a terminal):")
printc("<black>echo '0612345678' | base64 > '~/.smsapifreemobile_number.b64'<white>".format())
elif name == "user":
print("To create '~/.smsapifreemobile_user.b64', use your Free Mobile identifier (a 8 digit number, like '83123456'), and execute this command line (in a terminal):")
printc("<black>echo '83123456' | base64 > '~/.smsapifreemobile_user.b64'<white>".format())
elif name == "password":
print("To create '~/.smsapifreemobile_password.b64', go to this webpage, https://mobile.free.fr/account/mes-options/notifications-sms (after logging to your Free Mobile account), and copy the API key (a 14-caracters string on [a-zA-Z0-9]*, like '<KEY>'), and execute this command line (in a terminal):")
printc("<black>echo '<KEY>' | base64 > '~/.smsapifreemobile_password.b64<white>' ".format())
numbers = []
#: Number (not necessary)
# number = base64.b64decode(open(expanduser('~') + ".smsapifreemobile_number.b64").readline()[:-1])
# if number[-1] == '\n':
# number = number[:-1]
number = openSpecialFile("number")
numbers.append(number)
if testSpecialFile("number", "2"):
number2 = openSpecialFile("number", "2")
numbers.append(number2)
# Detect language
language = os.getenv("LANG")
language = language[0:2] if language else "fr"
# Maximum size that can be sent
# XXX Reference: https://en.wikipedia.org/wiki/Short_Message_Service#Message_size
# "6 to 8 segment messages are the practical maximum"
MAX_SIZE = 4 * 159
STR_MAX_SIZE = "4*159"
if language == "fr":
errorcodes = {
400: "Un des paramètres obligatoires est manquant.",
402: "Trop de SMS ont été envoyés en trop peu de temps.",
403: """Le service n'est pas activé sur l'espace abonné, ou login / clé incorrect.
Allez sur '<black>https://mobile.free.fr/account/mes-options/notifications-sms<white>' svp, et activez l'option correspondate.""",
500: "Erreur côté serveur. Veuillez réessayez ultérieurement.",
1: "Le SMS a été envoyé sur votre mobile ({}).".format(number) if len(numbers) <= 1 else "Le SMS a été envoyé sur vos numéros ({}).".format(numbers),
"toolong": "<red>Attention<white> : le message est trop long (+ de <black>{}<white> caracters, soit plus de 3 SMS).".format(STR_MAX_SIZE)
}
else:
errorcodes = {
400: "One of the necessary parameter is missing.",
402: "Too many SMSs has been sent in a short time (you might be a spammer!).",
403: """Access denied: the service might not be activated on the online personal space, or login/password is wrong.
Please go on '<black>https://mobile.free.fr/account/mes-options/notifications-sms<white>' please, and enable the corresponding option.""",
500: "Error from the server side. Please try again later.",
1: "The SMS has been sent to your mobile ({}).".format(number) if len(numbers) <= 1 else "The SMS has been sent to all your mobile numbers ({}).".format(numbers),
"toolong": "<red>Warning<white>: message is too long (more than <black>{}<white> caracters, so more than 3 SMS).".format(STR_MAX_SIZE)
}
def send_sms(text="Empty!", secured=True, sleep_duration=0):
""" Sens a free SMS to the user identified by [user], with [password].
:user: Free Mobile id (of the form [0-9]{8}),
:password: Service password (of the form [a-zA-Z0-9]{14}),
:text: The content of the message (a warning is displayed if the message is bigger than 480 caracters)
:secured: True to use HTTPS, False to use HTTP.
Returns a boolean and a status string.
"""
# DONE split the text into smaller pieces if length is too big (automatically, or propose to do it ?)
if len(text) > MAX_SIZE:
printc(errorcodes["toolong"])
nb_sub_messages = len(text) / MAX_SIZE
printc("\n<red>Warning<white>: message will be split in <red>{} piece{}<white> of size smaller than <black>{} characters<white>...".format(nb_sub_messages + 1, 's' if nb_sub_messages > 0 else '', MAX_SIZE))
printc(" <magenta>Note that new lines and other information can be lost!<white>")
for i, index in enumerate(range(0, len(text), MAX_SIZE)):
answer = send_sms(text[index: index + MAX_SIZE])
printc("For piece #{} of the message, the answer is:\n <magenta>{}<white>...\n".format(i + 1, answer[1]))
return answer
# raise ValueError(errorcodes["toolong"])
# Read user and password
users = []
#: Identification Number free mobile
user = openSpecialFile("user")
users.append(user)
if testSpecialFile("user", "2"):
user2 = openSpecialFile("user", "2")
users.append(user2)
passwords = []
#: Password
password = openSpecialFile("password")
passwords.append(password)
if testSpecialFile("password", "2"):
password2 = openSpecialFile("password", "2")
passwords.append(password2)
printc("\n<green>Your message is:<white>\n<yellow>" + text + "<white>")
url = "https" if secured else "http"
# Sending to all the numbers
results = []
for (user, password) in zip(users, passwords):
dictQuery = {"user": user, "pass": password, "msg": text}
string_query = json.dumps(dictQuery, sort_keys=True, indent=4)
string_query = string_query.replace(password, '*' * len(password))
printc("\nThe web-based query to the | |
<reponame>OOAmusat/idaes-pse
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
This module contains utility functions to generate phase equilibrium data and
plots.
"""
__author__ = "<NAME>"
# Import objects from pyomo package
from pyomo.environ import (
check_optimal_termination,
ConcreteModel,
SolverFactory,
value,
Var,
Constraint,
Expression,
units as pyunits,
)
import idaes.logger as idaeslog
from idaes.core.solvers import get_solver
import idaes.logger as idaeslog
# Import plotting functions
import matplotlib.pyplot as plt
import numpy as np
def Txy_diagram(
model,
component_1,
component_2,
pressure,
num_points=20,
temperature=298.15,
figure_name=None,
print_legend=True,
include_pressure=False,
print_level=idaeslog.NOTSET,
solver=None,
solver_op=None,
):
"""
This method generates T-x-y plots. Given the components, pressure and property dictionary
this function calls Txy_data() to generate T-x-y data and once the data has
been generated calls build_txy_diagrams() to create a plot.
Args:
component_1: Component which composition will be plotted in x axis
component_2: Component which composition will decrease in x axis
pressure: Pressure at which the bubble and drew temperatures will be calculated
temperature: Temperature at which to initialize state block
num_points: Number of data point to be calculated
properties: property package which contains parameters to calculate bubble
and dew temperatures for the mixture of the compnents specified.
figure_name: if a figure name is included the plot will save with the name
figure_name.png
print_legend (bool): = If True, include legend to distinguish between
Bubble and dew temperature. The default is True.
include_pressure (bool) = If True, print pressure at which the plot is
calculated in legends. The default is False.
print_level: printing level from initialization
solver: solver to use (default=None, use IDAES default solver)
solver_op: solver options
Returns:
Plot
"""
# Run txy_ data funtion to obtain bubble and dew twmperatures
Txy_data_to_plot = Txy_data(
model,
component_1,
component_2,
pressure,
num_points,
temperature,
print_level,
solver,
solver_op,
)
# Run diagrams function to convert t-x-y data into a plot
build_txy_diagrams(Txy_data_to_plot, figure_name, print_legend, include_pressure)
def Txy_data(
model,
component_1,
component_2,
pressure,
num_points=20,
temperature=298.15,
print_level=idaeslog.NOTSET,
solver=None,
solver_op=None,
):
"""
Function to generate T-x-y data. The function builds a state block and extracts
bubble and dew temperatures at P pressure for N number of compositions.
As N is increased increase the time of the calculation will increase and
create a smoother looking plot.
Args:
component_1: Component 1
component_2: Component 2
pressure: Pressure at which the bubble and drew temperatures will be calculates
temperature: Temperature at which to initialize state block
num_points: Number of data point to be calculated
model: Model wit intialized Property package which contains data to calculate
bubble and dew temperatures for component 1 and component 2
print_level: printing level from initialization
solver: solver to use (default=None, use IDAES default solver)
solver_op: solver options
Returns:
(Class): A class containing the T-x-y data
"""
components = list(model.params.component_list)
components_used = [component_1, component_2]
components_not_used = list(set(components) - set(components_used))
# Add properties parameter blocks to the flowsheet with specifications
model.props = model.params.build_state_block([1], default={"defined_state": True})
# Set intial concentration of component 1 close to 1
x = 0.99
# Set conditions for flash unit model
model.props[1].mole_frac_comp[component_1].fix(x)
for i in components_not_used:
model.props[1].mole_frac_comp[i].fix(1e-5)
xs = sum(value(model.props[1].mole_frac_comp[i]) for i in components_not_used)
model.props[1].mole_frac_comp[component_2].fix(1 - x - xs)
model.props[1].flow_mol.fix(1)
model.props[1].temperature.fix(temperature)
model.props[1].pressure.fix(pressure)
# Initialize flash unit model
model.props[1].calculate_scaling_factors()
model.props.initialize(solver=solver, optarg=solver_op, outlvl=print_level)
solver = get_solver(solver, solver_op)
# Create an array of compositions with N number of points
x_d = np.linspace(x, 1 - x - xs, num_points)
# Create emprty arrays for concentration, bubble temperature and dew temperature
X = []
Tbubb = []
Tdew = []
# Obtain pressure and temperature units from the unit model
Punit = pyunits.get_units(model.props[1].pressure)
Tunit = pyunits.get_units(model.props[1].temperature)
count = 1
# Create and run loop to calculate temperatures at every composition
for i in range(len(x_d)):
model.props[1].mole_frac_comp[component_1].fix(x_d[i])
model.props[1].mole_frac_comp[component_2].fix(1 - x_d[i] - xs)
# solve the model
status = solver.solve(model, tee=False)
# If solution is optimal store the concentration, and calculated temperatures in the created arrays
if check_optimal_termination(status):
print(
"Case: ", count, " Optimal. ", component_1, "x = {:.2f}".format(x_d[i])
)
if hasattr(model.props[1], "_mole_frac_tdew") and hasattr(
model.props[1], "_mole_frac_tbub"
):
Tbubb.append(value(model.props[1].temperature_bubble["Vap", "Liq"]))
Tdew.append(value(model.props[1].temperature_dew["Vap", "Liq"]))
elif hasattr(model.props[1], "_mole_frac_tdew"):
print("One of the components only exists in vapor phase.")
Tdew.append(value(model.props[1].temperature_dew["Vap", "Liq"]))
elif hasattr(model.props[1], "_mole_frac_tbub"):
print("One of the components only exists in liquid phase.")
Tbubb.append(value(model.props[1].temperature_bubble["Vap", "Liq"]))
X.append(x_d[i])
# If the solver did not solve to an optimal solution, do not store the data point
else:
print(
"Case: ", count, " No Result", component_1, "x = {:.2f}".format(x_d[i])
)
count += 1
# Call TXYData function and store the data in TD class
TD = TXYDataClass(component_1, component_2, Punit, Tunit, pressure)
TD.TBubb = Tbubb
TD.TDew = Tdew
TD.x = X
# Return the data class with all the information of the calculations
return TD
# Author: <NAME>
class TXYDataClass:
"""
Write data needed for build_txy_diagrams() into a class. The class can be
obtained by running Txy_data() or by assigining values to the class.
"""
def __init__(self, component_1, component_2, Punits, Tunits, pressure):
"""
Args:
component_1: Component 1
component_2: Component 2
Punits: Initial value of heat of hot utility
Tunits: Initial value of heat to be removed by cold utility
pressure: Pressure at which the T-x-y data was evaluated
Returns:
(Class): A class containing the T-x-y data
"""
# Build
self.Component_1 = component_1
self.Component_2 = component_2
# Assign units of pressure and temperature
self.Punits = Punits
self.Tunits = Tunits
# Assign pressure at which the data has been calculated
self.P = pressure
# Create arrays for data
self.TBubb = []
self.TDew = []
self.x = []
def Temp_Bubb(self, data_list):
"""
Args:
data_list: Bubble temperature array
Returns:
None
"""
self.TBubb = data_list
def Temp_Dew(self, data_list_2):
"""
Args:
data_list_2: Dew temperature array
Returns:
None
"""
self.Tdew = data_list_2
def composition(self, data_list_3):
"""
Args:
data_list_3: x data array
Returns:
None
"""
self.x = data_list_3
# Author: <NAME>
def build_txy_diagrams(
txy_data, figure_name=None, print_legend=True, include_pressure=False
):
"""
Args:
txy_data: Txy data class includes components bubble and dew
temperatures, compositions, components, pressure, and units.
figure_name: if a figure name is included the plot will save with the name
figure_name.png
print_legend (bool): = If True, include legend to distinguish between
Bubble and dew temperature. The default is True.
include_pressure (bool) = If True, print pressure at which the plot is
calculated in legends. The default is False.
Returns:
t-x-y plot
"""
# Declare a plot and it's size
ig, ax = plt.subplots(figsize=(12, 8))
if len(txy_data.TBubb) and len(txy_data.TDew) > 0:
if include_pressure == True:
# Plot results for bubble temperature
ax.plot(
txy_data.x,
txy_data.TBubb,
"r",
label="Bubble Temp P = "
+ str(txy_data.Press)
+ " "
+ str(txy_data.Punits),
linewidth=1.5,
)
# Plot results for dew temperature
ax.plot(
txy_data.x,
txy_data.TDew,
"b",
label="Dew Temp P = "
+ str(txy_data.Press)
+ " "
+ str(txy_data.Punits),
linewidth=1.5,
)
else:
# Plot results for bubble temperature
ax.plot(
txy_data.x, txy_data.TBubb, "r", label="Bubble Temp ", linewidth=1.5
)
# Plot results for dew temperature
ax.plot(txy_data.x, txy_data.TDew, "b", label="Dew Temp", linewidth=1.5)
elif len(txy_data.TDew) == 0:
if include_pressure == True:
# Plot results for bubble temperature
# Plot results for dew temperature
ax.plot(
txy_data.x,
txy_data.TBubb,
"b",
label="Dew Temp P = "
+ str(txy_data.Press)
+ " "
+ str(txy_data.Punits),
linewidth=1.5,
)
else:
# Plot results for dew temperature
ax.plot(txy_data.x, txy_data.TBubb, "b", label="Dew Temp", linewidth=1.5)
elif len(txy_data.TBubb) == 0:
if include_pressure == True:
# Plot results for bubble temperature
# Plot results for dew temperature
ax.plot(
txy_data.x,
txy_data.TDew,
"b",
label="Bubble Temp P = "
+ str(txy_data.Press)
+ " "
+ str(txy_data.Punits),
linewidth=1.5,
)
else:
# Plot results for dew temperature
ax.plot(txy_data.x, txy_data.TDew, "b", label="Dew Temp", linewidth=1.5)
# Include grid
ax.grid()
# Declare labels and fontsize
plt.xlabel(txy_data.Component_1 + " concentration (mol/mol)", fontsize=20)
plt.ylabel("Temperature [" + str(txy_data.Tunits) + "]", | |
from typing import Union, List, Tuple, Sequence, Dict, Any, Optional, Collection
from copy import copy
from pathlib import Path
import pickle as pkl
import logging
import random
import lmdb
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from scipy.spatial.distance import pdist, squareform
from .tokenizers import TAPETokenizer
from .registry import registry
logger = logging.getLogger(__name__)
def dataset_factory(data_file: Union[str, Path], *args, **kwargs) -> Dataset:
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
if data_file.suffix == '.lmdb':
return LMDBDataset(data_file, *args, **kwargs)
elif data_file.suffix in {'.fasta', '.fna', '.ffn', '.faa', '.frn'}:
return FastaDataset(data_file, *args, **kwargs)
elif data_file.suffix == '.json':
return JSONDataset(data_file, *args, **kwargs)
elif data_file.is_dir():
return NPZDataset(data_file, *args, **kwargs)
else:
raise ValueError(f"Unrecognized datafile type {data_file.suffix}")
def pad_sequences(sequences: Sequence, constant_value=0, dtype=None) -> np.ndarray:
batch_size = len(sequences)
shape = [batch_size] + np.max([seq.shape for seq in sequences], 0).tolist()
if dtype is None:
dtype = sequences[0].dtype
if isinstance(sequences[0], np.ndarray):
array = np.full(shape, constant_value, dtype=dtype)
elif isinstance(sequences[0], torch.Tensor):
array = torch.full(shape, constant_value, dtype=dtype)
for arr, seq in zip(array, sequences):
arrslice = tuple(slice(dim) for dim in seq.shape)
arr[arrslice] = seq
return array
class FastaDataset(Dataset):
"""Creates a dataset from a fasta file.
Args:
data_file (Union[str, Path]): Path to fasta file.
in_memory (bool, optional): Whether to load the full dataset into memory.
Default: False.
"""
def __init__(self,
data_file: Union[str, Path],
in_memory: bool = False):
from Bio import SeqIO
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
# if in_memory:
cache = list(SeqIO.parse(str(data_file), 'fasta'))
num_examples = len(cache)
self._cache = cache
# else:
# records = SeqIO.index(str(data_file), 'fasta')
# num_examples = len(records)
#
# if num_examples < 10000:
# logger.info("Reading full fasta file into memory because number of examples "
# "is very low. This loads data approximately 20x faster.")
# in_memory = True
# cache = list(records.values())
# self._cache = cache
# else:
# self._records = records
# self._keys = list(records.keys())
self._in_memory = in_memory
self._num_examples = num_examples
def __len__(self) -> int:
return self._num_examples
def __getitem__(self, index: int):
if not 0 <= index < self._num_examples:
raise IndexError(index)
# if self._in_memory and self._cache[index] is not None:
record = self._cache[index]
# else:
# key = self._keys[index]
# record = self._records[key]
# if self._in_memory:
# self._cache[index] = record
item = {'id': record.id,
'primary': str(record.seq),
'protein_length': len(record.seq)}
return item
class LMDBDataset(Dataset):
"""Creates a dataset from an lmdb file.
Args:
data_file (Union[str, Path]): Path to lmdb file.
in_memory (bool, optional): Whether to load the full dataset into memory.
Default: False.
"""
def __init__(self,
data_file: Union[str, Path],
in_memory: bool = False):
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
env = lmdb.open(str(data_file), max_readers=1, readonly=True,
lock=False, readahead=False, meminit=False)
with env.begin(write=False) as txn:
num_examples = pkl.loads(txn.get(b'num_examples'))
if in_memory:
cache = [None] * num_examples
self._cache = cache
self._env = env
self._in_memory = in_memory
self._num_examples = num_examples
def __len__(self) -> int:
return self._num_examples
def __getitem__(self, index: int):
if not 0 <= index < self._num_examples:
raise IndexError(index)
if self._in_memory and self._cache[index] is not None:
item = self._cache[index]
else:
with self._env.begin(write=False) as txn:
item = pkl.loads(txn.get(str(index).encode()))
if 'id' not in item:
item['id'] = str(index)
if self._in_memory:
self._cache[index] = item
return item
class JSONDataset(Dataset):
"""Creates a dataset from a json file. Assumes that data is
a JSON serialized list of record, where each record is
a dictionary.
Args:
data_file (Union[str, Path]): Path to json file.
in_memory (bool): Dummy variable to match API of other datasets
"""
def __init__(self, data_file: Union[str, Path], in_memory: bool = True):
import json
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
records = json.loads(data_file.read_text())
if not isinstance(records, list):
raise TypeError(f"TAPE JSONDataset requires a json serialized list, "
f"received {type(records)}")
self._records = records
self._num_examples = len(records)
def __len__(self) -> int:
return self._num_examples
def __getitem__(self, index: int):
if not 0 <= index < self._num_examples:
raise IndexError(index)
item = self._records[index]
if not isinstance(item, dict):
raise TypeError(f"Expected dataset to contain a list of dictionary "
f"records, received record of type {type(item)}")
if 'id' not in item:
item['id'] = str(index)
return item
class NPZDataset(Dataset):
"""Creates a dataset from a directory of npz files.
Args:
data_file (Union[str, Path]): Path to directory of npz files
in_memory (bool): Dummy variable to match API of other datasets
"""
def __init__(self,
data_file: Union[str, Path],
in_memory: bool = True,
split_files: Optional[Collection[str]] = None):
data_file = Path(data_file)
if not data_file.exists():
raise FileNotFoundError(data_file)
if not data_file.is_dir():
raise NotADirectoryError(data_file)
file_glob = data_file.glob('*.npz')
if split_files is None:
file_list = list(file_glob)
else:
split_files = set(split_files)
if len(split_files) == 0:
raise ValueError("Passed an empty split file set")
file_list = [f for f in file_glob if f.name in split_files]
if len(file_list) != len(split_files):
num_missing = len(split_files) - len(file_list)
raise FileNotFoundError(
f"{num_missing} specified split files not found in directory")
if len(file_list) == 0:
raise FileNotFoundError(f"No .npz files found in {data_file}")
self._file_list = file_list
def __len__(self) -> int:
return len(self._file_list)
def __getitem__(self, index: int):
if not 0 <= index < len(self):
raise IndexError(index)
item = dict(np.load(self._file_list[index]))
if not isinstance(item, dict):
raise TypeError(f"Expected dataset to contain a list of dictionary "
f"records, received record of type {type(item)}")
if 'id' not in item:
item['id'] = self._file_list[index].stem
return item
@registry.register_task('embed')
class EmbedDataset(Dataset):
def __init__(self,
data_file: Union[str, Path],
tokenizer: Union[str, TAPETokenizer] = 'iupac',
in_memory: bool = False,
convert_tokens_to_ids: bool = True):
super().__init__()
if isinstance(tokenizer, str):
tokenizer = TAPETokenizer(vocab=tokenizer)
self.tokenizer = tokenizer
self.data = dataset_factory(data_file)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int):
item = self.data[index]
token_ids = self.tokenizer.encode(item['primary'])
input_mask = np.ones_like(token_ids)
return item['id'], token_ids, input_mask
def collate_fn(self, batch: List[Tuple[Any, ...]]) -> Dict[str, torch.Tensor]:
ids, tokens, input_mask = zip(*batch)
ids = list(ids)
tokens = torch.from_numpy(pad_sequences(tokens))
input_mask = torch.from_numpy(pad_sequences(input_mask))
return {'ids': ids, 'input_ids': tokens, 'input_mask': input_mask} # type: ignore
@registry.register_task('masked_language_modeling')
class MaskedLanguageModelingDataset(Dataset):
"""Creates the Masked Language Modeling Pfam Dataset
Args:
data_path (Union[str, Path]): Path to tape data root.
split (str): One of ['train', 'valid', 'holdout'], specifies which data file to load.
in_memory (bool, optional): Whether to load the full dataset into memory.
Default: False.
"""
def __init__(self,
data_path: Union[str, Path],
split: str,
tokenizer: Union[str, TAPETokenizer] = 'iupac',
in_memory: bool = False):
super().__init__()
if split not in ('train', 'valid', 'holdout'):
raise ValueError(
f"Unrecognized split: {split}. "
f"Must be one of ['train', 'valid', 'holdout']")
if isinstance(tokenizer, str):
tokenizer = TAPETokenizer(vocab=tokenizer)
self.tokenizer = tokenizer
data_path = Path(data_path)
data_file = f'pfam/pfam_{split}.lmdb'
self.data = dataset_factory(data_path / data_file, in_memory)
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index):
item = self.data[index]
tokens = self.tokenizer.tokenize(item['primary'])
tokens = self.tokenizer.add_special_tokens(tokens)
masked_tokens, labels = self._apply_bert_mask(tokens)
masked_token_ids = np.array(
self.tokenizer.convert_tokens_to_ids(masked_tokens), np.int64)
input_mask = np.ones_like(masked_token_ids)
masked_token_ids = np.array(
self.tokenizer.convert_tokens_to_ids(masked_tokens), np.int64)
return masked_token_ids, input_mask, labels, item['clan'], item['family']
def collate_fn(self, batch: List[Any]) -> Dict[str, torch.Tensor]:
input_ids, input_mask, lm_label_ids, clan, family = tuple(zip(*batch))
input_ids = torch.from_numpy(pad_sequences(input_ids, 0))
input_mask = torch.from_numpy(pad_sequences(input_mask, 0))
# ignore_index is -1
lm_label_ids = torch.from_numpy(pad_sequences(lm_label_ids, -1))
clan = torch.LongTensor(clan) # type: ignore
family = torch.LongTensor(family) # type: ignore
return {'input_ids': input_ids,
'input_mask': input_mask,
'targets': lm_label_ids}
def _apply_bert_mask(self, tokens: List[str]) -> Tuple[List[str], List[int]]:
masked_tokens = copy(tokens)
labels = np.zeros([len(tokens)], np.int64) - 1
for i, token in enumerate(tokens):
# Tokens begin and end with start_token and stop_token, ignore these
if token in (self.tokenizer.start_token, self.tokenizer.stop_token):
pass
prob = random.random()
if prob < 0.15:
prob /= 0.15
labels[i] = self.tokenizer.convert_token_to_id(token)
if prob < 0.8:
# 80% random change to mask token
token = self.tokenizer.mask_token
elif prob < 0.9:
# 10% chance to change to random token
token = self.tokenizer.convert_id_to_token(
random.randint(0, self.tokenizer.vocab_size - 1))
else:
# 10% chance to keep current token
pass
masked_tokens[i] = token
return masked_tokens, labels
@registry.register_task('language_modeling')
class LanguageModelingDataset(Dataset):
"""Creates the Language Modeling Pfam Dataset
Args:
data_path (Union[str, Path]): Path to tape data root.
split (str): One of ['train', 'valid', 'holdout'], specifies which data file to load.
in_memory (bool, optional): Whether to load the full dataset into memory.
Default: False.
"""
def __init__(self,
data_path: Union[str, Path],
split: str,
tokenizer: Union[str, TAPETokenizer] = 'iupac',
in_memory: bool = False):
super().__init__()
if split not in ('train', 'valid', 'holdout'):
raise ValueError(
f"Unrecognized split: {split}. "
f"Must be one of ['train', 'valid', 'holdout']")
if isinstance(tokenizer, str):
tokenizer = TAPETokenizer(vocab=tokenizer)
self.tokenizer = tokenizer
data_path = Path(data_path)
data_file = f'pfam/pfam_{split}.lmdb'
self.data = dataset_factory(data_path / data_file, in_memory)
def __len__(self) -> int:
| |
10: return 10
else: return u
def __get_current_coords(self,):
return np.array([self.x_history[-1], self.y_history[-1], self.tetta_history[-1]])
def __get_terminal_coords(self,):
return np.array([self.xf, self.yf, self.tettaf])
def estimate(self,):
v0 = self.__get_current_coords()
vf = self.__get_terminal_coords()
return np.linalg.norm(vf - v0)
def reset(self,):
self.x_history = [self.x_history[0]]
self.y_history = [self.y_history[0]]
self.tetta_history = [self.tetta_history[0]]
self.time_history = [0.0]
self.control_histroy = [(0.0, 0.0)]
def get_coords(self,):
return (self.x_history, self.y_history)
def get_control_in_time(self,):
return (self.time_history, self.control_history)
def plot_trajectory(self,):
x, y = self.get_coords()
fig = plt.figure()
plt.plot(x, y, 'r')
plt.xlabel('${x}$',fontsize=20)
plt.ylabel('${y}$',fontsize=20)
plt.legend(['${y}({x})$'],loc='upper right')
plt.show()
class NetworkOperator(object):
"""
"""
def __init__(self, unaries, binaries, input_nodes, output_nodes):
"""
Instantiates Network Operator object
xq - list of variables and parameters
unaries - list of unary functions
binaries - list of binary functions
"""
self.psi = None
self.base_psi = None
self.un_dict = {ind: func for ind, func in enumerate(unaries)}
self.bin_dict = {ind: func for ind, func in enumerate(binaries)}
self.q = []
self.base_q = []
self.__input_node_free_index = None
self.output_nodes = output_nodes # list of indexes that output nodes posess
self.input_nodes = input_nodes # list of indexes that input nodes posess
def get_input_nodes(self,):
return self.input_nodes
def set_q(self, q):
"""
q - list
return dict
"""
self.q = {ind: val for ind, val in enumerate(q)}
self.__input_node_free_index = len(q)
def get_q(self,):
return self.q
def set_base_q(self, q):
self.base_q = {ind: val for ind, val in enumerate(q)}
self.__input_node_free_index = len(q)
def get_base_q(self,):
return self.base_q
def update_base_q(self,):
new_q = copy(self.get_q()).values()
self.set_base_q(new_q)
def roll_back_to_base_q(self,):
old_q = copy(self.get_base_q()).values()
self.set_q(old_q)
def get_free_input_node(self,):
return self.__input_node_free_index
def variate_parameters(self, index, value):
q = self.get_q()
q[index] = value
def get_psi(self,):
return self.psi
def set_psi(self, psi):
self.psi = psi
def get_base_psi(self,):
return self.base_psi
def set_base_psi(self, base_psi):
self.base_psi = base_psi
def update_base_psi(self,):
new_psi = deepcopy(self.get_psi())
self.set_base_psi(new_psi)
def roll_back_to_base_psi(self,):
old_psi = deepcopy(self.get_base_psi())
self.set_psi(old_psi)
def get_unary_dict_keys(self,):
return list(self.un_dict.keys())
def get_binary_dict_keys(self,):
return list(self.bin_dict.keys())
def eval_psi(self, x):
"""
out_nodes - indexes of nodes which are outputs of nop. [list]
x - list of state components
"""
x = {self.get_free_input_node() + ind: val for ind, val in enumerate(x)}
xq = {**self.q, **x} # merge two dicts without altering the originals
d = {} # node: value
psi = self.get_psi()
def apply_unary(unary_func, unary_index):
try: return unary_func(xq[unary_index]) # try to apply unary to q or x
except: return unary_func(d[unary_index]) # apply unary to a dictinoary with that node otherwise
for cell in psi:
binary_index = cell[-1][1] # binary operation index
binary_func = self.bin_dict[binary_index] # binary function object
d[cell[-1][0]] = binary_func([apply_unary(self.un_dict[i[1]], i[0]) for i in cell[:-1]])
nop_outputs = tuple([d[node] for node in self.output_nodes])
return nop_outputs
class StructureGenetics(object):
"""
"""
def __init__(self, nop, model):
self.nop = nop
self.model = model
self.model.set_control_function(nop.eval_psi)
self.base_estimation = None
self.qmin = None
self.qmax = None
def generate_variations_population(self, individuals, variations_per_individual):
"""
Generate population.
(int, int) -> np.array of integers (shape: [h x m x 4])
"""
population = {}
while len(population) < individuals:
individual = [self.generate_variation(mutate_param=False) for i in range(variations_per_individual)]
functional = self.estimate_variation(individual)
if functional < 1e+3: population[functional] = individual
return population
def estimate_object_function(self,):
"""
Evaluates function self.estimator with q as an incoming parameter
(vector) -> real
"""
functional = self.model.simulate()
self.model.reset()
return functional
def estimate_variation(self, variation_matrix):
for variation in variation_matrix:
if variation[0] != 3: # first we apply all kinds of variations except of the delete
self.apply_variation(variation)
for variation in variation_matrix:
if variation[0] == 3: # then we apply delete variation
self.apply_variation(variation)
a = self.nop.get_psi()
b = self.nop.get_base_psi()
c = self.nop.get_q()
d = self.nop.get_base_q()
if a == b and c == d: est = self.base_estimation
else: est = self.estimate_object_function()
self.nop.roll_back_to_base_psi()
self.nop.roll_back_to_base_q()
return est
### function to insert into parametric optimization ###
def estimate_parameters(self, q):
self.nop.set_q(q)
functional = self.estimate_object_function()
return functional
#######################################################
def get_best_individual(self, population, ksearch=None):#, worst=False, ksearch=None):
"""
Return best or worst individual:
1) if ksearch != None and worst==False: return best individual
from ksearch random sample without replacement.
2) if ksearch == None and worst==True: return index of the worst
individual from the whole population.
(2d array of real, bool, int) -> array of real OR int
"""
population_estimates = np.array(list(population.keys()))
if ksearch:# and not worst:
try:
subpopulation_estimates = population_estimates[np.random.choice(population_estimates.shape[0], ksearch, replace=False)]
individual_estimate = subpopulation_estimates.min()
return (population[individual_estimate], individual_estimate)
except ValueError as e: print('Wrong type for ksearch: {0}'.format(e))
else:
best_estimate = population_estimates.min()
return (population[best_estimate], best_estimate)
def generate_variation(self, mutate_param=False):
psi = self.nop.get_base_psi()
var_num = random.randint(0,4)
#var_num = np.random.choice(5, p=[0.1, 0.1, 0.1, 0.1, 0.6])
sublist_index = random.randint(0, len(psi) - 1) # operand column index
un_keys_list = self.nop.get_unary_dict_keys()
bin_keys_list = self.nop.get_binary_dict_keys()
if var_num == 4 or mutate_param: # nop must have at least one parameter
param_index = random.randrange(0, self.nop.get_free_input_node())
new_value = random.uniform(self.qmin, self.qmax)
if not mutate_param: return [4, param_index, new_value, None]
else: return [4, mutate_param, new_value, None]
elif var_num == 0: # change binary operation
bin_keys_list = self.nop.get_binary_dict_keys()
new_bin_op = random.choice(bin_keys_list)
c = random.randint(0, max(un_keys_list[-1], bin_keys_list[-1]))
return [0, sublist_index, c, new_bin_op]
elif var_num == 1: # change unary operation
un_keys_list = self.nop.get_unary_dict_keys()
l = len(psi[sublist_index])
unary_cell = random.randint(0, l - 2) # except binary node
new_un_op = random.choice(un_keys_list)
return [1, sublist_index, unary_cell, new_un_op]
elif var_num == 2: # add unary operation
new_un_op = random.choice(un_keys_list)
if sublist_index == 0:
node = random.choice(self.nop.get_input_nodes())
else:
node = random.randint(0, psi[sublist_index-1][-1][0])
return [2, sublist_index, node, new_un_op]
elif var_num == 3: # delete unary operation
a = random.randrange(0, len(psi))
b = random.randrange(0, len(psi[a]))
c = random.randint(0, max(un_keys_list[-1], bin_keys_list[-1]))
index_to_start_from_delete = None
exclude = []
inputs = self.nop.get_input_nodes()
for i in inputs:
for ind, val in enumerate(psi):
for j, v in enumerate(val):
if v[0] == i:
exclude.append((ind, j))
break
break
continue
left_bound = max(exclude, key=itemgetter(0)) # (sublist_index, tuple_index)
sublist_index = random.randint(left_bound[0], len(psi)-1)
l = len(psi[sublist_index])
if l > 3: # if that column has more than one operand
if sublist_index == left_bound[0]:
sample_indices = [j for j, v in enumerate(psi[sublist_index][:-1]) if j != left_bound[1]]
if sample_indices:
cell_to_del = random.choice(sample_indices)
else:
return [3, a, b, c]
else: cell_to_del = random.randint(0, l - 2) # choose random index of the cell, except the last(binary cell)
node_to_del = psi[sublist_index][cell_to_del][0] # operand row index
nodes = [list(map(itemgetter(0), sublist[:-1])) for sublist in psi] # all unary nodes (list of lists)
if sum(x.count(node_to_del) for x in nodes) > 1: return [3, sublist_index, cell_to_del, c] # if more than one occurence
else: return [3, a, b, c] # lost graph connectivity
else: return [3, a, b, c] # lost graph connectivity
def apply_variation(self, variation):
loc_psi = self.nop.get_psi()
sublist_index = variation[1]
if variation[0] == 0: # change binary
new_bin_op = variation[3]
if new_bin_op > len(self.nop.get_binary_dict_keys()) - 1: return None
node = loc_psi[sublist_index][-1][0]
loc_psi[sublist_index][-1] = (node, new_bin_op)
elif variation[0] == 1: # change unary
cell = variation[2]
new_un_op = variation[3]
if cell >= len(loc_psi[sublist_index]) - 1: return None
elif new_un_op > len(self.nop.get_unary_dict_keys()) - 1: return None
node = loc_psi[sublist_index][cell][0]
loc_psi[sublist_index][cell] = (node, new_un_op)
elif variation[0] == 2: # add unary
node = variation[2]
new_un_op = variation[3]
if new_un_op > len(self.nop.get_unary_dict_keys()) - 1: return None
new_cell = (node, new_un_op)
_ = loc_psi[sublist_index].pop()
loc_psi[sublist_index].append(new_cell)
loc_psi[sublist_index].append(_)
elif variation[0] == 3: # delete unary
node_to_del = variation[2]
if len(loc_psi[sublist_index]) < 3: return None
elif node_to_del >= len(loc_psi[sublist_index]) - 1: return None
else:
for ind, sublist in enumerate(loc_psi[:sublist_index]):
if sublist[-1][0] == node_to_del:
nodes = [list(map(itemgetter(0), sublist[:-1])) for sublist in loc_psi[ind + 1:]]
break
else:
nodes = [list(map(itemgetter(0), sublist[:-1])) for sublist in loc_psi]
if sum(x.count(node_to_del) for x in nodes) > 1:
del loc_psi[sublist_index][node_to_del]
else:
return None
elif variation[0] == 4: # change parameter
param_index = variation[1]
new_value = variation[2]
self.nop.variate_parameters(param_index, new_value)
def cross(self, population, ksearch, var_num, children_num=8):
best_individual, best_value = self.get_best_individual(population)
parent1, parent1_est = self.get_best_individual(population, ksearch=ksearch)
parent2, parent2_est = self.get_best_individual(population, ksearch=ksearch)
if np.max([best_value/parent1_est, best_value/parent2_est]) > np.random.uniform():
param_len = len(self.nop.get_q())
all_variations = np.vstack((parent1, parent2))
new_vars_len = round(0.38 * all_variations.shape[0])
_ = [self.generate_variation(mutate_param=i%param_len) for i in range(new_vars_len)]
_ = np.reshape(_, (-1, 4))
all_variations = np.vstack((all_variations, _))
sex = | |
IOError as e:
val = value.Str('<I/O error: %s>' % pyutil.strerror_IO(e))
except KeyboardInterrupt:
val = value.Str('<Ctrl-C>')
finally:
self.mem.PopStatusFrame()
return val
def EvalRhsWord(self, UP_w):
# type: (word_t) -> value_t
"""Used for RHS of assignment. There is no splitting.
"""
if UP_w.tag_() == word_e.Empty:
return value.Str('')
assert UP_w.tag_() == word_e.Compound, UP_w
w = cast(compound_word, UP_w)
if len(w.parts) == 1:
part0 = w.parts[0]
UP_part0 = part0
tag = part0.tag_()
# Special case for a=(1 2). ShArrayLiteral won't appear in words that
# don't look like assignments.
if tag == word_part_e.ShArrayLiteral:
part0 = cast(sh_array_literal, UP_part0)
array_words = part0.words
words = braces.BraceExpandWords(array_words)
strs = self.EvalWordSequence(words)
#log('ARRAY LITERAL EVALUATED TO -> %s', strs)
return value.MaybeStrArray(strs)
if tag == word_part_e.AssocArrayLiteral:
part0 = cast(word_part__AssocArrayLiteral, UP_part0)
d = {} # type: Dict[str, str]
n = len(part0.pairs)
i = 0
while i < n:
k = self.EvalWordToString(part0.pairs[i])
v = self.EvalWordToString(part0.pairs[i+1])
d[k.s] = v.s
i += 2
return value.AssocArray(d)
# If RHS doens't look like a=( ... ), then it must be a string.
return self.EvalWordToString(w)
def _EvalWordFrame(self, frame, argv):
# type: (List[Tuple[str, bool, bool]], List[str]) -> None
all_empty = True
all_quoted = True
any_quoted = False
#log('--- frame %s', frame)
for s, quoted, _ in frame:
if len(s):
all_empty = False
if quoted:
any_quoted = True
else:
all_quoted = False
# Elision of ${empty}${empty} but not $empty"$empty" or $empty""
if all_empty and not any_quoted:
return
# If every frag is quoted, e.g. "$a$b" or any part in "${a[@]}"x, then
# don't do word splitting or globbing.
if all_quoted:
tmp = [s for s, _, _ in frame]
a = ''.join(tmp)
argv.append(a)
return
will_glob = not self.exec_opts.noglob()
# Array of strings, some of which are BOTH IFS-escaped and GLOB escaped!
frags = [] # type: List[str]
for frag, quoted, do_split in frame:
if will_glob and quoted:
frag = glob_.GlobEscape(frag)
else:
# If we have a literal \, then we turn it into \\\\.
# Splitting takes \\\\ -> \\
# Globbing takes \\ to \ if it doesn't match
frag = _BackslashEscape(frag)
if do_split:
frag = _BackslashEscape(frag)
else:
frag = self.splitter.Escape(frag)
frags.append(frag)
flat = ''.join(frags)
#log('flat: %r', flat)
args = self.splitter.SplitForWordEval(flat)
# space=' '; argv $space"". We have a quoted part, but we CANNOT elide.
# Add it back and don't bother globbing.
if not args and any_quoted:
argv.append('')
return
#log('split args: %r', args)
for a in args:
self.globber.Expand(a, argv)
def _EvalWordToArgv(self, w):
# type: (compound_word) -> List[str]
"""Helper for _EvalAssignBuiltin.
Splitting and globbing are disabled for assignment builtins.
Example: declare -"${a[@]}" b=(1 2)
where a is [x b=a d=a]
"""
part_vals = [] # type: List[part_value_t]
self._EvalWordToParts(w, False, part_vals) # not double quoted
frames = _MakeWordFrames(part_vals)
argv = [] # type: List[str]
for frame in frames:
if len(frame): # empty array gives empty frame!
tmp = [s for (s, _, _) in frame]
argv.append(''.join(tmp)) # no split or glob
#log('argv: %s', argv)
return argv
def _EvalAssignBuiltin(self, builtin_id, arg0, words):
# type: (builtin_t, str, List[compound_word]) -> cmd_value__Assign
"""
Handles both static and dynamic assignment, e.g.
x='foo=bar'
local a=(1 2) $x
"""
# Grammar:
#
# ('builtin' | 'command')* keyword flag* pair*
# flag = [-+].*
#
# There is also command -p, but we haven't implemented it. Maybe just punt
# on it. Punted on 'builtin' and 'command' for now too.
eval_to_pairs = True # except for -f and -F
started_pairs = False
flags = [arg0] # initial flags like -p, and -f -F name1 name2
flag_spids = [word_.LeftMostSpanForWord(words[0])]
assign_args = [] # type: List[assign_arg]
n = len(words)
for i in xrange(1, n): # skip first word
w = words[i]
word_spid = word_.LeftMostSpanForWord(w)
if word_.IsVarLike(w):
started_pairs = True # Everything from now on is an assign_pair
if started_pairs:
left_token, close_token, part_offset = word_.DetectShAssignment(w)
if left_token: # Detected statically
if left_token.id != Id.Lit_VarLike:
# (not guaranteed since started_pairs is set twice)
e_die('LHS array not allowed in assignment builtin', word=w)
tok_val = left_token.val
if tok_val[-2] == '+':
e_die('+= not allowed in assignment builtin', word=w)
var_name = tok_val[:-1]
if part_offset == len(w.parts):
rhs_word = word.Empty() # type: word_t
else:
rhs_word = compound_word(w.parts[part_offset:])
# tilde detection only happens on static assignments!
tmp = word_.TildeDetect(rhs_word)
if tmp:
rhs_word = tmp
right = self.EvalRhsWord(rhs_word)
arg2 = assign_arg(var_name, right, word_spid)
assign_args.append(arg2)
else: # e.g. export $dynamic
argv = self._EvalWordToArgv(w)
for arg in argv:
left, right = _SplitAssignArg(arg, w)
arg2 = assign_arg(left, right, word_spid)
assign_args.append(arg2)
else:
argv = self._EvalWordToArgv(w)
for arg in argv:
if arg.startswith('-') or arg.startswith('+'): # e.g. declare -r +r
flags.append(arg)
flag_spids.append(word_spid)
# Shortcut that relies on -f and -F always meaning "function" for
# all assignment builtins
if 'f' in arg or 'F' in arg:
eval_to_pairs = False
else: # e.g. export $dynamic
if eval_to_pairs:
left, right = _SplitAssignArg(arg, w)
arg2 = assign_arg(left, right, word_spid)
assign_args.append(arg2)
started_pairs = True
else:
flags.append(arg)
return cmd_value.Assign(builtin_id, flags, flag_spids, assign_args)
def StaticEvalWordSequence2(self, words, allow_assign):
# type: (List[compound_word], bool) -> cmd_value_t
"""Static word evaluation for Oil."""
#log('W %s', words)
strs = [] # type: List[str]
spids = [] # type: List[int]
n = 0
for i, w in enumerate(words):
word_spid = word_.LeftMostSpanForWord(w)
# No globbing in the first arg! That seems like a feature, not a bug.
if i == 0:
strs0 = self._EvalWordToArgv(w) # respects strict-array
if len(strs0) == 1:
arg0 = strs0[0]
builtin_id = consts.LookupAssignBuiltin(arg0)
if builtin_id != consts.NO_INDEX:
# Same logic as legacy word eval, with no splitting
return self._EvalAssignBuiltin(builtin_id, arg0, words)
strs.extend(strs0)
for _ in strs0:
spids.append(word_spid)
continue
if glob_.LooksLikeStaticGlob(w):
val = self.EvalWordToString(w) # respects strict-array
num_appended = self.globber.Expand(val.s, strs)
for _ in xrange(num_appended):
spids.append(word_spid)
continue
part_vals = [] # type: List[part_value_t]
self._EvalWordToParts(w, False, part_vals) # not double quoted
if 0:
log('')
log('Static: part_vals after _EvalWordToParts:')
for entry in part_vals:
log(' %s', entry)
# Still need to process
frames = _MakeWordFrames(part_vals)
if 0:
log('')
log('Static: frames after _MakeWordFrames:')
for entry in frames:
log(' %s', entry)
# We will still allow x"${a[@]"x, though it's deprecated by @a, which
# disallows such expressions at parse time.
for frame in frames:
if len(frame): # empty array gives empty frame!
tmp = [s for (s, _, _) in frame]
strs.append(''.join(tmp)) # no split or glob
spids.append(word_spid)
return cmd_value.Argv(strs, spids, None)
def EvalWordSequence2(self, words, allow_assign=False):
# type: (List[compound_word], bool) -> cmd_value_t
"""Turns a list of Words into a list of strings.
Unlike the EvalWord*() methods, it does globbing.
Args:
words: list of Word instances
Returns:
argv: list of string arguments, or None if there was an eval error
"""
if self.exec_opts.simple_word_eval():
return self.StaticEvalWordSequence2(words, allow_assign)
# Parse time:
# 1. brace expansion. TODO: Do at parse time.
# 2. Tilde detection. DONE at parse time. Only if Id.Lit_Tilde is the
# first WordPart.
#
# Run time:
# 3. tilde sub, var sub, command sub, arith sub. These are all
# "concurrent" on WordParts. (optional process sub with <() )
# 4. word splitting. Can turn this off with a shell option? Definitely
# off for oil.
# 5. globbing -- several exec_opts affect this: nullglob, safeglob, etc.
#log('W %s', words)
strs = [] # type: List[str]
spids = [] # type: List[int]
n = 0
for i, w in enumerate(words):
part_vals = [] # type: List[part_value_t]
self._EvalWordToParts(w, False, part_vals) # not double quoted
# DYNAMICALLY detect if we're going to run an assignment builtin, and
# change the rest of the evaluation algorithm if so.
#
# We want to allow:
# e=export
# $e foo=bar
#
# But we don't want to evaluate the first word twice in the case of:
# $(some-command) --flag
if allow_assign and i == 0 and len(part_vals) == 1:
val0 = part_vals[0]
UP_val0 = val0
if val0.tag_() == part_value_e.String:
val0 = cast(part_value__String, UP_val0)
if not val0.quoted:
builtin_id = consts.LookupAssignBuiltin(val0.s)
if builtin_id != consts.NO_INDEX:
return self._EvalAssignBuiltin(builtin_id, val0.s, words)
if 0:
log('')
log('part_vals after _EvalWordToParts:')
for entry | |
"""
ECCO v4 Python: read_bin_llc
This module includes utility routines for loading binary files in the
llc 13-tile native flat binary layout. This layout is the default for
MITgcm input and output for global setups using lat-lon-cap (llc) layout.
The llc layout is used for ECCO v4.
.. _ecco_v4_py Documentation :
https://github.com/ECCO-GROUP/ECCOv4-py
"""
from __future__ import division,print_function
from xmitgcm import open_mdsdataset
import xmitgcm
import numpy as np
import xarray as xr
import time
import sys
from .llc_array_conversion import llc_compact_to_tiles, \
llc_compact_to_faces, llc_faces_to_tiles, llc_faces_to_compact, \
llc_tiles_to_faces, llc_tiles_to_compact
from .read_bin_gen import load_binary_array
from .ecco_utils import make_time_bounds_and_center_times_from_ecco_dataset
def load_ecco_vars_from_mds(mds_var_dir,
mds_grid_dir,
mds_files=None,
vars_to_load = 'all',
tiles_to_load = [0,1,2,3,4,5,6,7,8,9,10,11,12],
model_time_steps_to_load = 'all',
output_freq_code = '',
meta_variable_specific=dict(),
meta_common=dict(),
mds_datatype = '>f4',
llc_method = 'bigchunks',
less_output=True):
"""
Uses xmitgcm's *open_mdsdataset* routine to load ecco variable(s) from
MITgcm's MDS binary output into xarray Dataset/DataArray objects.
The main benefit of using this routine over open_mdsdataset is that this
routine allows for
- proper centering of the *time* variable for time-averaged fields
- creation of the *time-bnds* fields in time-averaged fields
- specification of extra variable-specific and globl metadata
xmitgcm.open_mdsdataset uses the model step number from the file name
(e.g., 732 from the file VAR.000000000732.data) to construct the
'time' field. For time-averaged fields, this model step
corresponds to END of the averaging period, not the time averaging mid
point. This routine fixes the 'time' field of time-averaged fields
to be the mid point of the time averaging period when the appropriate
*output_freq_code* is passed.
Parameters
----------
mds_var_dir : str
directory where the .data/.meta files are stored
mds_grid_dir : str
the directory where the model binary (.data) grid fields
are stored
mds_files : str or list or None, optional
either: a string or list of file names to load,
or None to load all files
Note : the name is everything BEFORE the time step
the mds_file name for 'var.000000000732.data' is 'var'
vars_to_load : str or list, optional, default 'all'
a string or list of the variable names to read from the mds_files
- if 'all' then all variables in the files are loaded
tiles_to_load : int or list of ints, optional, default range(13)
an int or list of ints indicating which tiles to load
model_time_steps_to_load : int or list of ints, optional, default 'all'
an int or list of ints indicating which model time steps to load
Note : the model time step indicates the time step when the the file was written.
when the field is a time average, this time step shows the END of the averaging period.
output_freq_code : str, optional, default empty string
a code used to create the proper time indices on the fields after loading
('AVG' or 'SNAPSHOT') + '_' + ('DAY','WEEK','MON', or 'YEAR')
valid options :
- AVG_DAY, AVG_WEEK, AVG_MON, AVG_YEAR
- SNAPSHOT_DAY, SNAPSHOT_WEEK, SNAPSHOT_MON, SNAPSHOT_YEAR
meta_variable_specific : dict, optional, default empty dictionary
a dictionary with variable-specific metadata. used when creating
the offical ECCO products
meta_common : dict, optional, default empty dictionary
a dictionary with globally-valid metadata for the ECCO fields.
useful when creating the offical ECCO netcdf fields
mds_datatype : string, optional, default '>f4'
code indicating what type of field to load if the xmitgcm cannot
determine the format from the .meta file. '>f4' means big endian
32 bit float.
llc_method : string, optional, default 'big_chunks'
refer to the xmitgcm documentation.
less_output : logical, optional
if True (default), omit additional print statements
Returns
=======
ecco_dataset : xarray Dataset
"""
# range object is different between python 2 and 3
if sys.version_info[0] >= 3 and isinstance(tiles_to_load, range):
tiles_to_load = list(tiles_to_load)
#ECCO v4 r3 starts 1992/1/1 12:00:00
ecco_v4_start_year = 1992
ecco_v4_start_mon = 1
ecco_v4_start_day = 1
ecco_v4_start_hour = 12
ecco_v4_start_min = 0
ecco_v4_start_sec = 0
# ECCO v4 r3 has 1 hour (3600 s) time steps
delta_t = 3600
# define reference date for xmitgcm
ref_date = str(ecco_v4_start_year) + '-' + str(ecco_v4_start_mon) + '-' + \
str(ecco_v4_start_day) + ' ' + str(ecco_v4_start_hour) + ':' + \
str(ecco_v4_start_min) + ':' + str(ecco_v4_start_sec)
if model_time_steps_to_load == 'all':
if not less_output:
print ('loading all model time steps')
ecco_dataset = open_mdsdataset(data_dir = mds_var_dir,
grid_dir = mds_grid_dir,
read_grid = True,
prefix = mds_files,
geometry = 'llc',
iters = 'all',
ref_date = ref_date,
delta_t = delta_t,
default_dtype = np.dtype(mds_datatype),
grid_vars_to_coords=True,
llc_method = llc_method)
else:
if not less_output:
print ('loading subset of model time steps')
if isinstance(model_time_steps_to_load, int):
model_time_steps_to_load = [model_time_steps_to_load]
if isinstance(model_time_steps_to_load, list):
ecco_dataset = open_mdsdataset(data_dir = mds_var_dir,
grid_dir = mds_grid_dir,
read_grid = True,
prefix = mds_files,
geometry = 'llc',
iters = model_time_steps_to_load,
ref_date = ref_date,
delta_t = delta_t,
default_dtype = np.dtype(mds_datatype),
grid_vars_to_coords=True,
llc_method=llc_method)
else:
raise TypeError('not a valid model_time_steps_to_load. must be "all", an "int", or a list of "int"')
# replace the xmitgcm coordinate name of 'FACE' with 'TILE'
if 'face' in ecco_dataset.coords.keys():
ecco_dataset = ecco_dataset.rename({'face': 'tile'})
ecco_dataset.tile.attrs['standard_name'] = 'tile_index'
# if vars_to_load is an empty list, keep all variables. otherwise,
# only keep those variables in the vars_to_load list.
vars_ignored = []
vars_loaded = []
if not isinstance(vars_to_load, list):
vars_to_load = [vars_to_load]
if not less_output:
print ('vars to load ', vars_to_load)
if 'all' not in vars_to_load:
if not less_output:
print ('loading subset of variables: ', vars_to_load)
# remove variables that are not on the vars_to_load_list
for ecco_var in ecco_dataset.keys():
if ecco_var not in vars_to_load:
vars_ignored.append(ecco_var)
ecco_dataset = ecco_dataset.drop(ecco_var)
else:
vars_loaded.append(ecco_var)
if not less_output:
print ('loaded : ', vars_loaded)
print ('ignored : ', vars_ignored)
else:
if not less_output:
print ('loaded all variables : ', ecco_dataset.keys())
# keep tiles in the 'tiles_to_load' list.
if not isinstance(tiles_to_load, list) and not isinstance(tiles_to_load,range):
tiles_to_load = [tiles_to_load]
if not less_output:
print ('subsetting tiles to ', tiles_to_load)
ecco_dataset = ecco_dataset.sel(tile = tiles_to_load)
#ecco_dataset = ecco_dataset.isel(time=0)
if not less_output:
print ('creating time bounds .... ')
if 'AVG' in output_freq_code and \
'time_bnds' not in ecco_dataset.keys():
if not less_output:
print ('avg in output freq code and time bounds not in ecco keys')
time_bnds_ds, center_times = \
make_time_bounds_and_center_times_from_ecco_dataset(ecco_dataset,\
output_freq_code)
ecco_dataset = xr.merge((ecco_dataset, time_bnds_ds))
if 'time_bnds-no-units' in meta_common:
ecco_dataset.time_bnds.attrs=meta_common['time_bnds-no-units']
ecco_dataset = ecco_dataset.set_coords('time_bnds')
if not less_output:
print ('time bounds -----')
print (time_bnds_ds)
print ('center times -----')
print (center_times)
print ('ecco dataset time values type', type(ecco_dataset.time.values))
print ('ecco dataset time_bnds typ ', type(ecco_dataset.time_bnds))
print ('ecco dataset time_bnds ', ecco_dataset.time_bnds)
if isinstance(ecco_dataset.time.values, np.datetime64):
if not less_output:
print ('replacing time.values....')
ecco_dataset.time.values = center_times
elif isinstance(center_times, np.datetime64):
if not less_output:
print ('replacing time.values....')
center_times = np.array(center_times)
ecco_dataset.time.values[:] = center_times
elif isinstance(ecco_dataset.time.values, np.ndarray) and \
isinstance(center_times, np.ndarray):
if not less_output:
print ('replacing time.values....')
ecco_dataset.time.values = center_times
if 'ecco-v4-time-average-center-no-units' in meta_common:
ecco_dataset.time.attrs = \
meta_common['ecco-v4-time-average-center-no-units']
if not less_output:
print ('dataset times : ', ecco_dataset.time.values)
elif 'SNAPSHOT' in output_freq_code:
if 'ecco-v4-time-snapshot-no-units' in meta_common:
ecco_dataset.time.attrs = \
meta_common['ecco-v4-time-snapshot-no-units']
#%% DROP SOME EXTRA FIELDS THAT DO NOT NEED TO BE IN THE DATASET
if 'maskCtrlS' in ecco_dataset.coords.keys():
ecco_dataset=ecco_dataset.drop('maskCtrlS')
if 'maskCtrlW' in ecco_dataset.coords.keys():
ecco_dataset=ecco_dataset.drop('maskCtrlW')
if 'maskCtrlC' in ecco_dataset.coords.keys():
ecco_dataset=ecco_dataset.drop('maskCtrlC')
# UPDATE THE VARIABLE SPECIFIC METADATA USING THE 'META_VARSPECIFIC' DICT.
# if it exists
for ecco_var in ecco_dataset.variables.keys():
if ecco_var in meta_variable_specific.keys():
ecco_dataset[ecco_var].attrs = meta_variable_specific[ecco_var]
#%% UPDATE THE GLOBAL METADATA USING THE 'META_COMMON' DICT, if it exists
ecco_dataset.attrs = dict()
if 'ecco-v4-global' in meta_common:
ecco_dataset.attrs.update(meta_common['ecco-v4-global'])
if 'k' in ecco_dataset.dims.keys() and \
'ecco-v4-global-3D' in meta_common:
ecco_dataset.attrs.update(meta_common['ecco-v4-global-3D'])
ecco_dataset.attrs['date_created'] = time.ctime()
# give it a hug?
ecco_dataset = ecco_dataset.squeeze()
return ecco_dataset
#%%
def read_llc_to_tiles_xmitgcm(fdir, fname, llc=90, skip=0, nk=1, nl=1,
filetype = '>f4', less_output = True):
"""
Loads an MITgcm binary file in the 'tiled' format of the
lat-lon-cap (LLC) grids via xmitgcm.
Array is returned with the following dimension order:
[N_tiles, N_recs, N_z, llc, llc]
where if either N_z or N_recs =1, then | |
<reponame>luduvigo/app-blog-code
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from google.appengine._internal.antlr3 import *
from google.appengine._internal.antlr3.compat import set, frozenset
from google.appengine._internal.antlr3.tree import *
HIDDEN = BaseRecognizer.HIDDEN
DOLLAR=33
LT=7
EXPONENT=28
LSQUARE=19
ASCII_LETTER=31
OCTAL_ESC=36
FLOAT=23
NAME_START=29
EOF=-1
LPAREN=17
INDEX=5
RPAREN=18
QUOTE=26
NAME=22
ESC_SEQ=27
PLUS=13
DIGIT=25
EQ=11
NE=12
T__42=42
T__43=43
T__40=40
GE=10
T__41=41
T__46=46
T__47=47
T__44=44
T__45=45
T__48=48
T__49=49
UNICODE_ESC=35
HEX_DIGIT=34
UNDERSCORE=32
INT=20
FN=6
MINUS=14
RSQUARE=21
PHRASE=24
WS=30
T__37=37
T__38=38
T__39=39
NEG=4
GT=9
DIV=16
TIMES=15
LE=8
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"NEG", "INDEX", "FN", "LT", "LE", "GT", "GE", "EQ", "NE", "PLUS", "MINUS",
"TIMES", "DIV", "LPAREN", "RPAREN", "LSQUARE", "INT", "RSQUARE", "NAME",
"FLOAT", "PHRASE", "DIGIT", "QUOTE", "ESC_SEQ", "EXPONENT", "NAME_START",
"WS", "ASCII_LETTER", "UNDERSCORE", "DOLLAR", "HEX_DIGIT", "UNICODE_ESC",
"OCTAL_ESC", "'.'", "','", "'abs'", "'count'", "'distance'", "'geopoint'",
"'if'", "'len'", "'log'", "'max'", "'min'", "'pow'", "'snippet'"
]
class ExpressionParser(Parser):
grammarFileName = "blaze-out/host/genfiles/apphosting/api/search/genantlr/Expression.g"
antlr_version = version_str_to_tuple("3.1.1")
antlr_version_str = "3.1.1"
tokenNames = tokenNames
def __init__(self, input, state=None):
if state is None:
state = RecognizerSharedState()
Parser.__init__(self, input, state)
self.dfa6 = self.DFA6(
self, 6,
eot = self.DFA6_eot,
eof = self.DFA6_eof,
min = self.DFA6_min,
max = self.DFA6_max,
accept = self.DFA6_accept,
special = self.DFA6_special,
transition = self.DFA6_transition
)
self._adaptor = CommonTreeAdaptor()
def getTreeAdaptor(self):
return self._adaptor
def setTreeAdaptor(self, adaptor):
self._adaptor = adaptor
adaptor = property(getTreeAdaptor, setTreeAdaptor)
def mismatch(input, ttype, follow):
raise MismatchedTokenException(ttype, input)
def recoverFromMismatchedSet(input, e, follow):
raise e
class expression_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def expression(self, ):
retval = self.expression_return()
retval.start = self.input.LT(1)
root_0 = None
EOF2 = None
cmpExpr1 = None
EOF2_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_cmpExpr_in_expression92)
cmpExpr1 = self.cmpExpr()
self._state.following.pop()
self._adaptor.addChild(root_0, cmpExpr1.tree)
EOF2=self.match(self.input, EOF, self.FOLLOW_EOF_in_expression94)
EOF2_tree = self._adaptor.createWithPayload(EOF2)
self._adaptor.addChild(root_0, EOF2_tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class cmpExpr_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def cmpExpr(self, ):
retval = self.cmpExpr_return()
retval.start = self.input.LT(1)
root_0 = None
addExpr3 = None
cmpOp4 = None
addExpr5 = None
try:
try:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_addExpr_in_cmpExpr107)
addExpr3 = self.addExpr()
self._state.following.pop()
self._adaptor.addChild(root_0, addExpr3.tree)
alt1 = 2
LA1_0 = self.input.LA(1)
if ((LT <= LA1_0 <= NE)) :
alt1 = 1
if alt1 == 1:
pass
self._state.following.append(self.FOLLOW_cmpOp_in_cmpExpr110)
cmpOp4 = self.cmpOp()
self._state.following.pop()
root_0 = self._adaptor.becomeRoot(cmpOp4.tree, root_0)
self._state.following.append(self.FOLLOW_addExpr_in_cmpExpr113)
addExpr5 = self.addExpr()
self._state.following.pop()
self._adaptor.addChild(root_0, addExpr5.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class cmpOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def cmpOp(self, ):
retval = self.cmpOp_return()
retval.start = self.input.LT(1)
root_0 = None
set6 = None
set6_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
set6 = self.input.LT(1)
if (LT <= self.input.LA(1) <= NE):
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set6))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class addExpr_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def addExpr(self, ):
retval = self.addExpr_return()
retval.start = self.input.LT(1)
root_0 = None
multExpr7 = None
addOp8 = None
multExpr9 = None
try:
try:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_multExpr_in_addExpr171)
multExpr7 = self.multExpr()
self._state.following.pop()
self._adaptor.addChild(root_0, multExpr7.tree)
while True:
alt2 = 2
LA2_0 = self.input.LA(1)
if ((PLUS <= LA2_0 <= MINUS)) :
alt2 = 1
if alt2 == 1:
pass
self._state.following.append(self.FOLLOW_addOp_in_addExpr174)
addOp8 = self.addOp()
self._state.following.pop()
root_0 = self._adaptor.becomeRoot(addOp8.tree, root_0)
self._state.following.append(self.FOLLOW_multExpr_in_addExpr177)
multExpr9 = self.multExpr()
self._state.following.pop()
self._adaptor.addChild(root_0, multExpr9.tree)
else:
break
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class addOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def addOp(self, ):
retval = self.addOp_return()
retval.start = self.input.LT(1)
root_0 = None
set10 = None
set10_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
set10 = self.input.LT(1)
if (PLUS <= self.input.LA(1) <= MINUS):
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set10))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class multExpr_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def multExpr(self, ):
retval = self.multExpr_return()
retval.start = self.input.LT(1)
root_0 = None
unary11 = None
multOp12 = None
unary13 = None
try:
try:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_unary_in_multExpr211)
unary11 = self.unary()
self._state.following.pop()
self._adaptor.addChild(root_0, unary11.tree)
while True:
alt3 = 2
LA3_0 = self.input.LA(1)
if ((TIMES <= LA3_0 <= DIV)) :
alt3 = 1
if alt3 == 1:
pass
self._state.following.append(self.FOLLOW_multOp_in_multExpr214)
multOp12 = self.multOp()
self._state.following.pop()
root_0 = self._adaptor.becomeRoot(multOp12.tree, root_0)
self._state.following.append(self.FOLLOW_unary_in_multExpr217)
unary13 = self.unary()
self._state.following.pop()
self._adaptor.addChild(root_0, unary13.tree)
else:
break
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class multOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def multOp(self, ):
retval = self.multOp_return()
retval.start = self.input.LT(1)
root_0 = None
set14 = None
set14_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
set14 = self.input.LT(1)
if (TIMES <= self.input.LA(1) <= DIV):
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set14))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class unary_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def unary(self, ):
retval = self.unary_return()
retval.start = self.input.LT(1)
root_0 = None
MINUS15 = None
atom16 = None
atom17 = None
MINUS15_tree = None
stream_MINUS = RewriteRuleTokenStream(self._adaptor, "token MINUS")
stream_atom = RewriteRuleSubtreeStream(self._adaptor, "rule atom")
try:
try:
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == MINUS) :
alt4 = 1
elif (LA4_0 == LPAREN or LA4_0 == INT or (NAME <= LA4_0 <= PHRASE) or (39 <= LA4_0 <= 49)) :
alt4 = 2
else:
nvae = NoViableAltException("", 4, 0, self.input)
raise nvae
if alt4 == 1:
pass
MINUS15=self.match(self.input, MINUS, self.FOLLOW_MINUS_in_unary251)
stream_MINUS.add(MINUS15)
self._state.following.append(self.FOLLOW_atom_in_unary253)
atom16 = self.atom()
self._state.following.pop()
stream_atom.add(atom16.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.create(NEG, "-"), root_1)
self._adaptor.addChild(root_1, stream_atom.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt4 == 2:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_atom_in_unary268)
atom17 = self.atom()
self._state.following.pop()
self._adaptor.addChild(root_0, atom17.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
self.reportError(e)
raise e
finally:
pass
return retval
class atom_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def atom(self, ):
retval = self.atom_return()
retval.start = self.input.LT(1)
root_0 = None
LPAREN22 = None
RPAREN24 = None
var18 = None
num19 = None
str20 = None
fn21 = None
addExpr23 = None
LPAREN22_tree = None
RPAREN24_tree = None
stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN")
stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN")
stream_addExpr = RewriteRuleSubtreeStream(self._adaptor, "rule addExpr")
try:
try:
alt5 = 5
LA5 = self.input.LA(1)
if LA5 == NAME:
alt5 = 1
elif LA5 == INT or LA5 == FLOAT:
alt5 = 2
elif LA5 == PHRASE:
alt5 = 3
elif LA5 == 39 or LA5 == 40 or LA5 == 41 or LA5 == 42 or LA5 == 43 or LA5 == 44 or LA5 == 45 or LA5 == 46 or LA5 == 47 or LA5 == 48 or LA5 == 49:
alt5 = 4
elif LA5 == LPAREN:
alt5 = 5
else:
nvae = NoViableAltException("", 5, 0, self.input)
raise nvae
if alt5 == 1:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_var_in_atom281)
var18 = self.var()
self._state.following.pop()
self._adaptor.addChild(root_0, var18.tree)
elif alt5 == 2:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_num_in_atom287)
num19 = self.num()
self._state.following.pop()
self._adaptor.addChild(root_0, num19.tree)
elif alt5 == 3:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_str_in_atom293)
str20 = self.str()
self._state.following.pop()
self._adaptor.addChild(root_0, str20.tree)
elif alt5 == 4:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_fn_in_atom299)
fn21 = self.fn()
self._state.following.pop()
self._adaptor.addChild(root_0, fn21.tree)
elif alt5 == 5:
pass
LPAREN22=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_atom305)
stream_LPAREN.add(LPAREN22)
self._state.following.append(self.FOLLOW_addExpr_in_atom307)
addExpr23 = self.addExpr()
self._state.following.pop()
stream_addExpr.add(addExpr23.tree)
RPAREN24=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_atom309)
stream_RPAREN.add(RPAREN24)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_addExpr.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, e:
| |
<gh_stars>1-10
#! /usr/bin/env python3
import os
import sys
import argparse
import signal
import time
from datetime import datetime, timedelta
import xml.etree.ElementTree as etree
import xml.dom.minidom as minidom
import json
import random
import re
#--import the base mapper library and variants
try:
import base_mapper
except:
print('')
print('Please export PYTHONPATH=$PYTHONPATH:<path to mapper-base project>')
print('')
sys.exit(1)
baseLibrary = base_mapper.base_library(os.path.abspath(base_mapper.__file__).replace('base_mapper.py','base_variants.json'))
if not baseLibrary.initialized:
sys.exit(1)
#----------------------------------------
def pause(question='PRESS ENTER TO CONTINUE ...'):
""" pause for debug purposes """
try: response = input(question)
except KeyboardInterrupt:
response = None
global shutDown
shutDown = True
return response
#----------------------------------------
def signal_handler(signal, frame):
print('USER INTERUPT! Shutting down ... (please wait)')
global shutDown
shutDown = True
return
#----------------------------------------
def updateStat(cat1, cat2, example = None):
if cat1 not in statPack:
statPack[cat1] = {}
if cat2 not in statPack[cat1]:
statPack[cat1][cat2] = {}
statPack[cat1][cat2]['count'] = 0
statPack[cat1][cat2]['count'] += 1
if example:
if 'examples' not in statPack[cat1][cat2]:
statPack[cat1][cat2]['examples'] = []
if example not in statPack[cat1][cat2]['examples']:
if len(statPack[cat1][cat2]['examples']) < 5:
statPack[cat1][cat2]['examples'].append(example)
else:
randomSampleI = random.randint(2,4)
statPack[cat1][cat2]['examples'][randomSampleI] = example
return
#----------------------------------------
def getAttr (segment, tagName):
""" get an xml element text value """
try: value = segment.attrib[tagName]
except: value = None
else:
if len(value) == 0:
value = None
return value
#----------------------------------------
def getValue (segment, tagName = None):
""" get an xml element text value """
try:
if tagName:
value = segment.find(tagName).text.strip()
else:
value = segment.text.strip()
except: value = None
else:
if len(value) == 0:
value = None
return value
#----------------------------------------
def idNoteParse(notes, codeType):
#--check if enclosed in parens
notes = notes.lower().replace('.','')
groupedStrings = re.findall('\(.*?\)',notes)
for maybeCountry in groupedStrings:
maybeCountry = maybeCountry[1:len(maybeCountry)-1]
isoCountry = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
if isoCountry:
return isoCountry
elif ',' in maybeCountry:
countryName = maybeCountry[maybeCountry.find(',')+1:].strip()
isoCountry = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
if isoCountry:
return isoCountry
#--look for various labels
tokenList = []
if 'country of issue:' in notes:
tokenList = notes[notes.find('country of issue:')+17:].strip().split()
else: #or try the whole string
tokenList = notes.strip().split()
#--if single token (just confirm or deny)
if len(tokenList) == 1:
if tokenList[0][-1] in (',', ';', ':'):
tokenList[0] = tokenList[0][0:-1]
maybeCountry = tokenList[0]
isoCountry = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
if isoCountry:
return isoCountry
else:
return None
priorToken1 = ''
priorToken2 = ''
priorToken3 = ''
maybeCountry = ''
for currentToken in tokenList:
if currentToken[-1] in (',', ';', ':'):
currentToken = currentToken[0:-1]
maybeCountry = currentToken
isoCountry0 = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
isoCountry1 = None
isoCountry2 = None
isoCountry3 = None
if priorToken1:
maybeCountry = priorToken1 + ' ' + currentToken
isoCountry1 = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
if priorToken2:
maybeCountry = priorToken2 + ' ' + priorToken1 + ' ' + currentToken
isoCountry2 = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
if priorToken3:
maybeCountry = priorToken3 + ' ' + priorToken2 + ' ' + priorToken1 + ' ' + currentToken
isoCountry3 = baseLibrary.isoCountryCode(maybeCountry) if codeType == 'country' else baseLibrary.isoStateCode(maybeCountry)
if isoCountry0 and currentToken not in ('id', 'in','is','on','no','and'): #--careful of connecting words here!
return isoCountry0
elif isoCountry1:
return isoCountry1
elif isoCountry2:
return isoCountry2
elif isoCountry3:
return isoCountry3
priorToken3 = priorToken2
priorToken2 = priorToken1
priorToken1 = currentToken
return None
#----------------------------------------
def concatDateParts(day, month, year):
#--15-mar-2010 is format
fullDate = ''
if day:
fullDate += day + '-'
if month:
fullDate += month + '-'
if year:
fullDate += year
return fullDate
#----------------------------------------
def g2Mapping(masterRecord, recordType):
#--header
jsonData = {}
jsonData['DATA_SOURCE'] = dataSource
jsonData['RECORD_ID'] = masterRecord.attrib['id']
jsonData['LAST_UPDATE'] = masterRecord.attrib['date']
jsonData['STATUS'] = getValue(masterRecord, 'ActiveStatus')
jsonData['DJ_PROFILE_ID'] = masterRecord.attrib['id']
gender = getValue(masterRecord, 'Gender')
if gender:
jsonData['GENDER'] = gender
updateStat('ATTRIBUTE', 'GENDER')
deceased = getValue(masterRecord, 'Deceased')
if deceased == 'Yes':
jsonData['DECEASED'] = deceased
updateStat('OTHER', 'DECEASED', deceased)
if extendedFormat:
profileNotes = getValue(masterRecord, 'ProfileNotes')
if profileNotes:
jsonData['PROFILE_NOTES'] = profileNotes
updateStat('ATTRIBUTE', 'PROFILE_NOTES')
#--names
# <NameType NameTypeID="1" RecordType="Person">Primary Name</NameType>
# <NameType NameTypeID="2" RecordType="Person">Also Known As</NameType>
# <NameType NameTypeID="3" RecordType="Person">Low Quality AKA</NameType>
# <NameType NameTypeID="4" RecordType="Person">Maiden Name</NameType>
# <NameType NameTypeID="5" RecordType="Person">Formerly Known As</NameType>
# <NameType NameTypeID="6" RecordType="Person">Spelling Variation</NameType>
# <NameType NameTypeID="7" RecordType="Entity">Primary Name</NameType>
# <NameType NameTypeID="8" RecordType="Entity">Also Known As</NameType>
# <NameType NameTypeID="9" RecordType="Entity">Formerly Known As</NameType>
# <NameType NameTypeID="10" RecordType="Entity">Spelling Variation</NameType>
# <NameType NameTypeID="11" RecordType="Entity">Low Quality AKA</NameType>
orgPersonNameConflict = False
thisList = []
for nameRecord in masterRecord.findall('NameDetails/Name'):
nameType = nameRecord.attrib['NameType'][0:25]
if 'PRIMARY' in nameType.upper():
nameType = 'PRIMARY'
for nameValue in nameRecord.findall('NameValue'):
nameStr = ''
name = {}
name['NAME_TYPE'] = nameType
updateStat('NAME_TYPE', nameType)
nameOrg = getValue(nameValue, 'EntityName')
if nameOrg:
if len(nameOrg.split()) > 16:
updateStat('TRUNCATIONS', 'longNameOrgCnt', nameOrg)
nameOrg = ' '.join(nameOrg.split()[:16])
name['NAME_ORG'] = nameOrg
nameStr = nameOrg
nameLast = getValue(nameValue, 'Surname')
if nameLast:
if len(nameLast.split()) > 5:
updateStat('TRUNCATIONS', 'longNameLastCnt', nameLast)
nameLast = ' '.join(nameLast.split()[:5])
name['NAME_LAST'] = nameLast
nameStr = nameLast
nameMaiden = getValue(nameValue, 'MaidenName')
if nameMaiden and not nameLast: #--either Surname or MaidenName will be populated
if len(nameMaiden.split()) > 5:
updateStat('TRUNCATIONS', 'longNameMaidenCnt', nameMaiden)
nameMaiden = ' '.join(nameMaiden.split()[:5])
name['NAME_LAST'] = nameMaiden
nameStr = nameLast
nameFirst = getValue(nameValue, 'FirstName')
if nameFirst:
if len(nameFirst.split()) > 5:
updateStat('TRUNCATIONS', 'longNameFirstCnt', nameFirst)
nameFirst = ' '.join(nameFirst.split()[:5])
name['NAME_FIRST'] = nameFirst
nameStr += (' '+nameFirst)
nameMiddle = getValue(nameValue, 'MiddleName')
if nameMiddle:
if len(nameMiddle.split()) > 5:
updateStat('TRUNCATIONS', 'longNameMiddleCnt', nameMiddle)
nameMiddle = ' '.join(nameMiddle.split()[:5])
name['NAME_MIDDLE'] = nameMiddle
nameStr += (' '+nameMiddle)
namePrefix = getValue(nameValue, 'TitleHonorific')
if namePrefix:
name['NAME_PREFIX'] = namePrefix
nameSuffix = getValue(nameValue, 'Suffix')
if nameSuffix:
name['NAME_SUFFIX'] = nameSuffix
thisList.append(name)
#--check for a name conflict
if (recordType == 'PERSON' and 'NAME_ORG' in name) or (recordType != 'PERSON' and 'NAME_LAST' in name):
orgPersonNameConflict = True
#--duplicate this name segment for original script version if supplied
originalScriptName = getValue(nameValue, 'OriginalScriptName')
if originalScriptName:
name = {}
updateStat('NAME_TYPE', 'OriginalScriptName')
name['NAME_TYPE'] = 'OriginalScriptName'
name['NAME_FULL'] = originalScriptName
thisList.append(name)
if thisList:
jsonData['NAMES'] = thisList
if orgPersonNameConflict:
print('warning: person and org names on record %s' % jsonData['RECORD_ID'])
#--dates
# <DateType Id="1" RecordType="Person" name="Date of Birth"/>
# <DateType Id="2" RecordType="Person" name="Deceased Date"/>
# <DateType Id="3" RecordType="Entity" name="Date of Registration"/>
thisList = []
for dateRecord in masterRecord.findall('DateDetails/Date'):
try: dateType = dateRecord.attrib['DateType']
except: #--all but the anti-corruption (SOC) feed use DateType
try: dateType = dateRecord.attrib['DateTypeId']
except:
print('bad date record!')
print(minidom.parseString(etree.tostring(dateRecord, 'utf-8')).toprettyxml(indent="\t"))
continue
if dateType == 'Date of Birth':
dateType = 'DATE_OF_BIRTH'
elif dateType == 'Deceased Date':
dateType = 'DATE_OF_DEATH'
elif dateType == 'Date of Registration':
dateType = 'REGISTRATION_DATE'
for dateValue in dateRecord.findall('DateValue'):
day = getAttr(dateValue, 'Day')
month = getAttr(dateValue, 'Month')
year = getAttr(dateValue, 'Year')
thisDate = concatDateParts(day, month, year)
if dateType == 'DATE_OF_BIRTH':
outputFormat = '%Y-%m-%d'
if not day and not month:
updateStat('DOB_DATA', 'year only', thisDate)
elif year and month and not day:
updateStat('DOB_DATA', 'year/month only', thisDate)
elif month and day and not year:
updateStat('DOB_DATA', 'month/day only', thisDate)
else:
updateStat('DOB_DATA', 'full', thisDate)
formattedDate = baseLibrary.formatDate(thisDate)
if formattedDate:
thisList.append({dateType: formattedDate})
else:
jsonData[dateType] = thisDate
updateStat('ATTRIBUTE', dateType, thisDate)
if thisList:
jsonData['DATES'] = thisList
#--addresses
thisList = []
for addrRecord in masterRecord.findall('Address'):
address = {}
addrLine = getValue(addrRecord, 'AddressLine')
if addrLine:
if len(addrLine.split()) > 16:
updateStat('TRUNCATIONS', 'longAddrLineCnt', addrLine)
addrLine = ' '.join(addrLine.split()[:16])
address['ADDR_LINE1'] = addrLine
addrCity = getValue(addrRecord, 'AddressCity')
if addrCity:
address['ADDR_CITY'] = addrCity
addrCountry = getValue(addrRecord, 'AddressCountry')
if addrCountry:
address['ADDR_COUNTRY'] = countryCodes[addrCountry] if addrCountry in countryCodes else addrCountry
isoCountry = baseLibrary.isoCountryCode(address['ADDR_COUNTRY'])
if isoCountry:
address['ADDR_COUNTRY'] = isoCountry
thisList.append(address)
updateStat('ADDRESS', 'UNTYPED')
if thisList:
jsonData['ADDRESSES'] = thisList
#--company details (address/website)
thisList1 = []
thisList2 = []
for addrRecord in masterRecord.findall('CompanyDetails'):
address = {}
address['ADDR_TYPE'] = 'BUSINESS'
addrLine = getValue(addrRecord, 'AddressLine')
if addrLine:
if len(addrLine.split()) > 16:
updateStat('TRUNCATIONS', 'longAddrLineCnt', addrLine)
addrLine = ' '.join(addrLine.split()[:16])
address['ADDR_LINE1'] = addrLine
addrCity = getValue(addrRecord, 'AddressCity')
if addrCity:
address['ADDR_CITY'] = addrCity
addrCountry = getValue(addrRecord, 'AddressCountry')
if addrCountry:
address['ADDR_COUNTRY'] = countryCodes[addrCountry] if addrCountry in countryCodes else addrCountry
isoCountry = baseLibrary.isoCountryCode(address['ADDR_COUNTRY'])
if isoCountry:
address['ADDR_COUNTRY'] = isoCountry
thisList1.append(address)
updateStat('ADDRESS', 'BUSINESS')
url = getValue(addrRecord, 'URL')
if url:
thisList2.append({'WEBSITE_ADDRESS': url})
updateStat('ATTRIBUTE', 'WEBSITE_ADDRESS')
if thisList1:
jsonData['COMPANY_ADDRESSES'] = thisList1
if thisList2:
jsonData['COMPANY_WEBSITES'] = thisList2
#--countries
thisList1 = []
for birthPlaceRecord in masterRecord.findall('BirthPlace/Place'):
birthPlace = birthPlaceRecord.attrib['name']
thisList1.append({'PLACE_OF_BIRTH': birthPlace})
updateStat('ATTRIBUTE', 'PLACE_OF_BIRTH')
for countryRecord in masterRecord.findall('CountryDetails/Country'):
countryType = countryRecord.attrib['CountryType']
if countryType == 'Citizenship':
attributeType = 'CITIZENSHIP'
else:
if countryType == 'REGISTRATION':
usageType = 'REGISTRATION'
elif countryType == 'Resident of':
usageType = 'RESIDENT'
elif countryType == 'Jurisdiction':
| |
#!/usr/bin/python
#
# Compares vmstate information stored in JSON format, obtained from
# the -dump-vmstate QEMU command.
#
# Copyright 2014 <NAME> <<EMAIL>>
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
# Count the number of errors found
taint = 0
def bump_taint():
global taint
# Ensure we don't wrap around or reset to 0 -- the shell only has
# an 8-bit return value.
if taint < 255:
taint = taint + 1
def check_fields_match(name, s_field, d_field):
if s_field == d_field:
return True
# Some fields changed names between qemu versions. This list
# is used to whitelist such changes in each section / description.
changed_names = {
'apic': ['timer', 'timer_expiry'],
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
'I440FX': ['dev', 'parent_obj'],
'ich9_ahci': ['card', 'parent_obj'],
'ich9-ahci': ['ahci', 'ich9_ahci'],
'ioh3420': ['PCIDevice', 'PCIEDevice'],
'ioh-3240-express-root-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'cirrus_vga': ['hw_cursor_x', 'vga.hw_cursor_x',
'hw_cursor_y', 'vga.hw_cursor_y'],
'lsiscsi': ['dev', 'parent_obj'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
'pcnet': ['pci_dev', 'parent_obj'],
'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]',
'pm1a.sts', 'ar.pm1.evt.sts', 'pm1a.en', 'ar.pm1.evt.en',
'pm1_cnt.cnt', 'ar.pm1.cnt.cnt',
'tmr.timer', 'ar.tmr.timer',
'tmr.overflow_time', 'ar.tmr.overflow_time',
'gpe', 'ar.gpe'],
'rtl8139': ['dev', 'parent_obj'],
'qxl': ['num_surfaces', 'ssd.num_surfaces'],
'usb-ccid': ['abProtocolDataStructure', 'abProtocolDataStructure.data'],
'usb-host': ['dev', 'parent_obj'],
'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'vmware_vga': ['card', 'parent_obj'],
'vmware_vga_internal': ['depth', 'new_depth'],
'xhci': ['pci_dev', 'parent_obj'],
'x3130-upstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-downstream-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'xio3130-downstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
'br.dev.exp.aer_log',
'parent_obj.parent_obj.exp.aer_log'],
'spapr_pci': ['dma_liobn[0]', 'mig_liobn',
'mem_win_addr', 'mig_mem_win_addr',
'mem_win_size', 'mig_mem_win_size',
'io_win_addr', 'mig_io_win_addr',
'io_win_size', 'mig_io_win_size'],
}
if not name in changed_names:
return False
if s_field in changed_names[name] and d_field in changed_names[name]:
return True
return False
def get_changed_sec_name(sec):
# Section names can change -- see commit 292b1634 for an example.
changes = {
"ICH9 LPC": "ICH9-LPC",
"e1000-82540em": "e1000",
}
for item in changes:
if item == sec:
return changes[item]
if changes[item] == sec:
return item
return ""
def exists_in_substruct(fields, item):
# Some QEMU versions moved a few fields inside a substruct. This
# kept the on-wire format the same. This function checks if
# something got shifted inside a substruct. For example, the
# change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
if not "Description" in fields:
return False
if not "Fields" in fields["Description"]:
return False
substruct_fields = fields["Description"]["Fields"]
if substruct_fields == []:
return False
return check_fields_match(fields["Description"]["name"],
substruct_fields[0]["field"], item)
def check_fields(src_fields, dest_fields, desc, sec):
# This function checks for all the fields in a section. If some
# fields got embedded into a substruct, this function will also
# attempt to check inside the substruct.
d_iter = iter(dest_fields)
s_iter = iter(src_fields)
# Using these lists as stacks to store previous value of s_iter
# and d_iter, so that when time comes to exit out of a substruct,
# we can go back one level up and continue from where we left off.
s_iter_list = []
d_iter_list = []
advance_src = True
advance_dest = True
unused_count = 0
while True:
if advance_src:
try:
s_item = s_iter.next()
except StopIteration:
if s_iter_list == []:
break
s_iter = s_iter_list.pop()
continue
else:
if unused_count == 0:
# We want to avoid advancing just once -- when entering a
# dest substruct, or when exiting one.
advance_src = True
if advance_dest:
try:
d_item = d_iter.next()
except StopIteration:
if d_iter_list == []:
# We were not in a substruct
print "Section \"" + sec + "\",",
print "Description " + "\"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "while dest has no further fields"
bump_taint()
break
d_iter = d_iter_list.pop()
advance_src = False
continue
else:
if unused_count == 0:
advance_dest = True
if unused_count != 0:
if advance_dest == False:
unused_count = unused_count - s_item["size"]
if unused_count == 0:
advance_dest = True
continue
if unused_count < 0:
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "unused size mismatch near \"",
print s_item["field"] + "\""
bump_taint()
break
continue
if advance_src == False:
unused_count = unused_count - d_item["size"]
if unused_count == 0:
advance_src = True
continue
if unused_count < 0:
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "unused size mismatch near \"",
print d_item["field"] + "\""
bump_taint()
break
continue
if not check_fields_match(desc, s_item["field"], d_item["field"]):
# Some fields were put in substructs, keeping the
# on-wire format the same, but breaking static tools
# like this one.
# First, check if dest has a new substruct.
if exists_in_substruct(d_item, s_item["field"]):
# listiterators don't have a prev() function, so we
# have to store our current location, descend into the
# substruct, and ensure we come out as if nothing
# happened when the substruct is over.
#
# Essentially we're opening the substructs that got
# added which didn't change the wire format.
d_iter_list.append(d_iter)
substruct_fields = d_item["Description"]["Fields"]
d_iter = iter(substruct_fields)
advance_src = False
continue
# Next, check if src has substruct that dest removed
# (can happen in backward migration: 2.0 -> 1.5)
if exists_in_substruct(s_item, d_item["field"]):
s_iter_list.append(s_iter)
substruct_fields = s_item["Description"]["Fields"]
s_iter = iter(substruct_fields)
advance_dest = False
continue
if s_item["field"] == "unused" or d_item["field"] == "unused":
if s_item["size"] == d_item["size"]:
continue
if d_item["field"] == "unused":
advance_dest = False
unused_count = d_item["size"] - s_item["size"]
continue
if s_item["field"] == "unused":
advance_src = False
unused_count = s_item["size"] - d_item["size"]
continue
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "got \"" + d_item["field"] + "\"; skipping rest"
bump_taint()
break
check_version(s_item, d_item, sec, desc)
if not "Description" in s_item:
# Check size of this field only if it's not a VMSTRUCT entry
check_size(s_item, d_item, sec, desc, s_item["field"])
check_description_in_list(s_item, d_item, sec, desc)
def check_subsections(src_sub, dest_sub, desc, sec):
for s_item in src_sub:
found = False
for d_item in dest_sub:
if s_item["name"] != d_item["name"]:
continue
found = True
check_descriptions(s_item, d_item, sec)
if not found:
print "Section \"" + sec + "\", Description \"" + desc + "\":",
print "Subsection \"" + s_item["name"] + "\" not found"
bump_taint()
def check_description_in_list(s_item, d_item, sec, desc):
if not "Description" in s_item:
return
if not "Description" in d_item:
print "Section \"" + sec + "\", Description \"" + desc + "\",",
print "Field \"" + s_item["field"] + "\": missing description"
bump_taint()
return
check_descriptions(s_item["Description"], d_item["Description"], sec)
def check_descriptions(src_desc, dest_desc, sec):
check_version(src_desc, dest_desc, sec, src_desc["name"])
if not check_fields_match(sec, src_desc["name"], dest_desc["name"]):
print "Section \"" + sec + "\":",
print "Description \"" + src_desc["name"] + "\"",
print "missing, got \"" + dest_desc["name"] + "\" instead; skipping"
bump_taint()
return
for f in src_desc:
if not f in dest_desc:
print "Section \"" + sec + "\"",
print "Description \"" + src_desc["name"] + "\":",
print "Entry \"" + f + "\" missing"
bump_taint()
continue
if f == 'Fields':
check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec)
if f == 'Subsections':
check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec)
def check_version(s, d, sec, desc=None):
if s["version_id"] > d["version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "version error:", s["version_id"], ">", d["version_id"]
bump_taint()
if not "minimum_version_id" in d:
return
if s["version_id"] < d["minimum_version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "minimum version error:", s["version_id"], "<",
print d["minimum_version_id"]
bump_taint()
def check_size(s, d, sec, desc=None, field=None):
if s["size"] != d["size"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\"",
if field:
print "Field \"" + field + "\"",
print "size mismatch:", s["size"], ",", | |
"""
Test homebase.
Since some of the homebase functionality is OS specific, this module can only get 100% coverage if it is run on
all three operating systems: Windows, Linux, and macOS. Thus, coverage tools will need to merge reports from different
runs.
From a functional perspective, the only thing that changes across operating systems are expected results.
As such, the strategy for testing is as follows:
1. A base class TestHomebase defines basic expectations and all of the unit tests common to all three operating
systems.
2. A class for handling virtualenv tests, TestHomebaseVirtualEnv that modifies the expected path for each
OS to expect the appropriate virtualenv base dir, and runs all tests of TestHomebase by inheriting from it.
3. The base class TestHomebase tests linux in the case where the XDG variables are not set. Another class
TestHomebaseLinuxXDG sets these variables and the expected paths and reruns the tests by inheriting from
TestHomebase.
4.A class for Windows TestHomebaseWindows adds tests for roaming and app_author values.
To run the tests against linux, you can use the accompanying Dockerfile.
"""
import os
import sys
import shutil
import unittest
import virtualenv
import homebase
_PY2 = sys.version_info[0] == 2
if _PY2:
def itervalues(d):
return d.itervalues()
else:
def itervalues(d):
return iter(d.values())
class TestHomebase(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.app_name = "test_app"
self.app_author = 'for_windows_only'
# we can't just use sys.platform because on linux it could be either linux2 or just linux
# (https://docs.python.org/2/library/sys.html#sys.platform)
# If we're masking linux, might as well mask darwin with mac os.
if sys.platform.startswith('linux'):
self.platform = 'linux'
elif sys.platform == 'darwin':
self.platform = 'mac_os'
else:
self.platform = 'windows'
self.base_paths = self._expected_base_paths()
def _expected_base_paths(self):
mac_os_app_support = os.path.expanduser('~/Library/Application Support/')
mac_os_site_app_support = '/Library/Application Support'
base_paths = {
'mac_os': {
'user_data': mac_os_app_support,
'user_config': mac_os_app_support,
'user_state': mac_os_app_support,
'user_cache': os.path.expanduser('~/Library/Caches'),
'user_log': os.path.expanduser('~/Library/Logs'),
'site_data': [mac_os_site_app_support],
'site_config': [mac_os_site_app_support]
},
'linux': {
'user_data': os.path.expanduser("~/.local/share"),
'user_config': os.path.expanduser('~/.config'),
'user_state': os.path.expanduser('~/.local/state'),
'user_cache': os.path.expanduser('~/.cache'),
'user_log': os.path.expanduser('~/.log'),
'site_data': ['/usr/local/share', '/usr/share'],
'site_config': ['/etc/xdg']
},
'windows': self._windows_base_paths()
}
# add virtualenv expectations. When there is no actual virtualenv, we expect the use_virtualenv parameter
# to do nothing.
for paths in itervalues(base_paths):
paths['user_data_venv'] = paths['user_data']
paths['user_config_venv'] = paths['user_config']
paths['user_state_venv'] = paths['user_state']
paths['user_cache_venv'] = paths['user_cache']
paths['user_log_venv'] = paths['user_log']
return base_paths
def _windows_base_paths(self):
windows_username = os.getenv('username')
base_paths = {
'user_data': '',
'user_config': '',
'user_state': '',
'user_cache': '',
'user_log': '',
'site_data': '',
'site_config': ''
}
if windows_username is not None:
windows_absolute_path = os.path.join('C:', os.sep)
windows_base_path = os.path.join(windows_absolute_path, 'Users', windows_username, 'AppData', 'Local')
# add windows expectations.
base_paths['user_data'] = os.path.join(windows_base_path, self.app_author)
base_paths['user_config'] = os.path.join(windows_base_path, self.app_author)
base_paths['user_state'] = os.path.join(windows_base_path, self.app_author)
base_paths['user_cache'] = os.path.join(windows_base_path, self.app_author, 'Caches')
base_paths['user_log'] = os.path.join(windows_base_path, self.app_author, 'Logs')
base_paths['site_data'] = [os.path.join(windows_absolute_path, 'ProgramData', self.app_author)]
base_paths['site_config'] = [os.path.join(windows_absolute_path, 'ProgramData', self.app_author)]
return base_paths
@staticmethod
def _setup_linux_xdg_vars():
test_dir = os.path.abspath(os.path.dirname(__file__))
os.environ['XDG_DATA_HOME'] = os.path.join(test_dir, '.local/share')
os.environ['XDG_CONFIG_HOME'] = os.path.join(test_dir, '.config')
os.environ['XDG_STATE_HOME'] = os.path.join(test_dir, '.local/state')
os.environ['XDG_CACHE_HOME'] = os.path.join(test_dir, '.cache')
os.environ['XDG_DATA_DIRS'] = os.pathsep.join([os.path.join(test_dir, '.site/data'),
os.path.join(test_dir, '.site/data2')])
os.environ['XDG_CONFIG_DIRS'] = os.pathsep.join([os.path.join(test_dir, '.site/config'),
os.path.join(test_dir, '.site/config2')])
@staticmethod
def _clear_linux_xdg_vars():
var_names = ['XDG_DATA_HOME', 'XDG_CONFIG_HOME', 'XDG_STATE_HOME', 'XDG_CACHE_HOME', 'XDG_DATA_DIRS',
'XDG_CONFIG_DIRS']
for var in var_names:
if var in os.environ:
del os.environ[var]
@classmethod
def setUpClass(cls):
cls._clear_linux_xdg_vars()
#####################################################
# user_data_dir.
#####################################################
def test_user_data_no_version_no_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_data'], self.app_name)
self.assertEqual(expected,
homebase.user_data_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=False))
def test_user_data_no_version_no_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_data'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected, homebase.user_data_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_data_no_version_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_data_venv'], self.app_name)
self.assertEqual(expected, homebase.user_data_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=False))
def test_user_data_no_version_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_data_venv'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected, homebase.user_data_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_data_version_no_venv_no_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_data'], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.user_data_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=False))
def test_user_data_version_no_venv_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_data'], '{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
result = homebase.user_data_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=False,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_data_version_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_data_venv'],
'{}_{}'.format(self.app_name, version))
result = homebase.user_data_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=False)
self.assertEqual(expected, result)
def test_user_data_version_venv_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_data_venv'],
'{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertFalse(os.path.exists(expected))
result = homebase.user_data_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
#####################################################
# user_cache_dir.
#####################################################
def test_user_cache_no_version_no_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_cache'], self.app_name)
self.assertEqual(expected,
homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=False))
def test_user_cache_no_version_no_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_cache'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected,
homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_cache_no_version_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_cache_venv'], self.app_name)
self.assertEqual(expected,
homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=False))
def test_user_cache_no_version_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_cache_venv'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected, homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_cache_version_no_venv_no_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_cache'], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=False))
def test_user_cache_version_no_venv_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_cache'], '{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
result = homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=False,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_cache_version_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_cache_venv'],
'{}_{}'.format(self.app_name, version))
result = homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=False)
self.assertEqual(expected, result)
def test_user_cache_version_venv_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_cache_venv'],
'{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertFalse(os.path.exists(expected))
result = homebase.user_cache_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
#####################################################
# user_config_dir
#####################################################
def test_user_config_no_version_no_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_config'], self.app_name)
self.assertEqual(expected,
homebase.user_config_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=False))
def test_user_config_no_version_no_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_config'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected,
homebase.user_config_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_config_no_version_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_config_venv'], self.app_name)
self.assertEqual(expected,
homebase.user_config_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=False))
def test_user_config_no_version_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_config_venv'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected,
homebase.user_config_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_config_version_no_venv_no_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_config'], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.user_config_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=False))
def test_user_config_version_no_venv_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_config'], '{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
result = homebase.user_config_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_config_version_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_config_venv'],
'{}_{}'.format(self.app_name, version))
result = homebase.user_config_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=False)
self.assertEqual(expected, result)
def test_user_config_version_venv_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_config_venv'],
'{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertFalse(os.path.exists(expected))
result = homebase.user_config_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
#####################################################
# user_state_dir
#####################################################
def test_user_state_no_version_no_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_state'], self.app_name)
self.assertEqual(expected,
homebase.user_state_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=False))
def test_user_state_no_version_no_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_state'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected,
homebase.user_state_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_state_no_version_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_state_venv'], self.app_name)
self.assertEqual(expected,
homebase.user_state_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=False))
def test_user_state_no_version_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_state_venv'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected, homebase.user_state_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_state_version_no_venv_no_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_state'], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.user_state_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=False))
def test_user_state_version_no_venv_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_state'], '{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
result = homebase.user_state_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=False,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_state_version_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_state_venv'],
'{}_{}'.format(self.app_name, version))
result = homebase.user_state_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=False)
self.assertEqual(expected, result)
def test_user_state_version_venv_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_state_venv'],
'{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertFalse(os.path.exists(expected))
result = homebase.user_state_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
#####################################################
# user_logs_dir
#####################################################
def test_user_log_no_version_no_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_log'], self.app_name)
self.assertEqual(expected, homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=False))
def test_user_log_no_version_no_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_log'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected, homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_log_no_version_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_log_venv'], self.app_name)
self.assertEqual(expected, homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=False))
def test_user_log_no_version_venv_create(self):
expected = os.path.join(self.base_paths[self.platform]['user_log_venv'], self.app_name)
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertEqual(expected, homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=True))
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_log_version_no_venv_no_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_log'], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=False))
def test_user_log_version_no_venv_create(self):
version = "1.0"
expected = os.path.join(self.base_paths[self.platform]['user_log'], '{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
result = homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=False,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
def test_user_log_version_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_log_venv'],
'{}_{}'.format(self.app_name, version))
result = homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=False)
self.assertEqual(expected, result)
def test_user_log_version_venv_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['user_log_venv'],
'{}_{}'.format(self.app_name, version))
if os.path.exists(expected):
shutil.rmtree(expected)
self.assertFalse(os.path.exists(expected))
result = homebase.user_logs_dir(self.app_name, app_author=self.app_author, version=version, use_virtualenv=True,
create=True)
self.assertEqual(expected, result)
self.assertTrue(os.path.exists(expected))
shutil.rmtree(expected)
#####################################################
# site_data_homebase. Not testing create=True since we don't know if we have permissions.
#####################################################
def test_site_data_no_version_no_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['site_data'][0], self.app_name)
self.assertEqual(expected,
homebase.site_data_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=False, create=False))
def test_site_data_no_version_venv_no_create(self):
expected = os.path.join(self.base_paths[self.platform]['site_data'][0], self.app_name)
self.assertEqual(expected,
homebase.site_data_dir(self.app_name, app_author=self.app_author, version=None,
use_virtualenv=True, create=False))
def test_site_data_version_no_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['site_data'][0], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.site_data_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=False, create=False))
def test_site_data_version_venv_no_create(self):
version = '1.0'
expected = os.path.join(self.base_paths[self.platform]['site_data'][0], '{}_{}'.format(self.app_name, version))
self.assertEqual(expected,
homebase.site_data_dir(self.app_name, app_author=self.app_author, version=version,
use_virtualenv=True, create=False))
| |
current month,, *overall: 0.
:param stats_fields: Additional fields to add to statistics
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.getStatistics'
response_type = responses.AdsGetStatistics
return self._call(method_name, method_parameters, param_aliases, response_type)
@overload
def get_suggestions(self, section: Literal['countries', 'positions', 'interest_categories', 'religions', 'user_devices', 'user_os', 'user_browsers'], ids: Optional[str] = None, q: Optional[str] = None, country: Optional[int] = None, cities: Optional[str] = None, lang: Optional[str] = None) -> responses.AdsGetSuggestions: ...
@overload
def get_suggestions(self, section: Literal['regions'], ids: Optional[str] = None, q: Optional[str] = None, country: Optional[int] = None, cities: Optional[str] = None, lang: Optional[str] = None) -> responses.AdsGetSuggestionsRegions: ...
@overload
def get_suggestions(self, section: Literal['cities', 'districts', 'streets'], ids: Optional[str] = None, q: Optional[str] = None, country: Optional[int] = None, cities: Optional[str] = None, lang: Optional[str] = None) -> responses.AdsGetSuggestionsCities: ...
@overload
def get_suggestions(self, section: Literal['schools'], ids: Optional[str] = None, q: Optional[str] = None, country: Optional[int] = None, cities: Optional[str] = None, lang: Optional[str] = None) -> responses.AdsGetSuggestionsSchools: ...
def get_suggestions(self, section: str, ids: Optional[str] = None, q: Optional[str] = None, country: Optional[int] = None, cities: Optional[str] = None, lang: Optional[str] = None):
"""
Returns a set of auto-suggestions for various targeting parameters.
:param section: Section, suggestions are retrieved in. Available values: *countries — request of a list of countries. If q is not set or blank, a short list of countries is shown. Otherwise, a full list of countries is shown. *regions — requested list of regions. 'country' parameter is required. *cities — requested list of cities. 'country' parameter is required. *districts — requested list of districts. 'cities' parameter is required. *stations — requested list of subway stations. 'cities' parameter is required. *streets — requested list of streets. 'cities' parameter is required. *schools — requested list of educational organizations. 'cities' parameter is required. *interests — requested list of interests. *positions — requested list of positions (professions). *group_types — requested list of group types. *religions — requested list of religious commitments. *browsers — requested list of browsers and mobile devices.
:param ids: Objects IDs separated by commas. If the parameter is passed, 'q, country, cities' should not be passed.
:param q: Filter-line of the request (for countries, regions, cities, streets, schools, interests, positions).
:param country: ID of the country objects are searched in.
:param cities: IDs of cities where objects are searched in, separated with a comma.
:param lang: Language of the returned string values. Supported languages: *ru — Russian,, *ua — Ukrainian,, *en — English.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.getSuggestions'
if section in ['countries', 'positions', 'interest_categories', 'religions', 'user_devices', 'user_os', 'user_browsers']:
response_type = responses.AdsGetSuggestions
if section in ['regions']:
response_type = responses.AdsGetSuggestionsRegions
if section in ['cities', 'districts', 'streets']:
response_type = responses.AdsGetSuggestionsCities
if section in ['schools']:
response_type = responses.AdsGetSuggestionsSchools
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_target_groups(self, account_id: int, client_id: Optional[int] = None, extended: Optional[bool] = None) -> responses.AdsGetTargetGroups:
"""
Returns a list of target groups.
:param account_id: Advertising account ID.
:param client_id: 'Only for advertising agencies.', ID of the client with the advertising account where the group will be created.
:param extended: '1' — to return pixel code.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.getTargetGroups'
response_type = responses.AdsGetTargetGroups
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_targeting_stats(self, account_id: int, link_url: str, client_id: Optional[int] = None, criteria: Optional[str] = None, ad_id: Optional[int] = None, ad_format: Optional[int] = None, ad_platform: Optional[str] = None, ad_platform_no_wall: Optional[str] = None, ad_platform_no_ad_network: Optional[str] = None, link_domain: Optional[str] = None, need_precise: Optional[bool] = None) -> responses.AdsGetTargetingStats:
"""
Returns the size of targeting audience, and also recommended values for CPC and CPM.
:param account_id: Advertising account ID.
:param link_url: URL for the advertised object.
:param client_id:
:param criteria: Serialized JSON object that describes targeting parameters. Description of 'criteria' object see below.
:param ad_id: ID of an ad which targeting parameters shall be analyzed.
:param ad_format: Ad format. Possible values: *'1' — image and text,, *'2' — big image,, *'3' — exclusive format,, *'4' — community, square image,, *'7' — special app format,, *'8' — special community format,, *'9' — post in community,, *'10' — app board.
:param ad_platform: Platforms to use for ad showing. Possible values: (for 'ad_format' = '1'), *'0' — VK and partner sites,, *'1' — VK only. (for 'ad_format' = '9'), *'all' — all platforms,, *'desktop' — desktop version,, *'mobile' — mobile version and apps.
:param ad_platform_no_wall:
:param ad_platform_no_ad_network:
:param link_domain: Domain of the advertised object.
:param need_precise: Additionally return recommended cpc and cpm to reach 5,10..95 percents of audience.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.getTargetingStats'
response_type = responses.AdsGetTargetingStats
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_upload_url(self, ad_format: int, icon: Optional[int] = None) -> responses.AdsGetUploadUrl:
"""
Returns URL to upload an ad photo to.
:param ad_format: Ad format: *1 — image and text,, *2 — big image,, *3 — exclusive format,, *4 — community, square image,, *7 — special app format.
:param icon:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.getUploadURL'
response_type = responses.AdsGetUploadUrl
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_video_upload_url(self) -> responses.AdsGetVideoUploadUrl:
"""
Returns URL to upload an ad video to.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.getVideoUploadURL'
response_type = responses.AdsGetVideoUploadUrl
return self._call(method_name, method_parameters, param_aliases, response_type)
def import_target_contacts(self, account_id: int, target_group_id: int, contacts: str, client_id: Optional[int] = None) -> responses.AdsImportTargetContacts:
"""
Imports a list of advertiser's contacts to count VK registered users against the target group.
:param account_id: Advertising account ID.
:param target_group_id: Target group ID.
:param contacts: List of phone numbers, emails or user IDs separated with a comma.
:param client_id: 'Only for advertising agencies.' , ID of the client with the advertising account where the group will be created.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.importTargetContacts'
response_type = responses.AdsImportTargetContacts
return self._call(method_name, method_parameters, param_aliases, response_type)
def remove_office_users(self, account_id: int, ids: str) -> responses.AdsRemoveOfficeUsers:
"""
Removes managers and/or supervisors from advertising account.
:param account_id: Advertising account ID.
:param ids: Serialized JSON array with IDs of deleted managers.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.removeOfficeUsers'
response_type = responses.AdsRemoveOfficeUsers
return self._call(method_name, method_parameters, param_aliases, response_type)
def update_ads(self, account_id: int, data: str) -> responses.AdsUpdateAds:
"""
Edits ads.
:param account_id: Advertising account ID.
:param data: Serialized JSON array of objects that describe changes in ads. Description of 'ad_edit_specification' objects see below.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.updateAds'
response_type = responses.AdsUpdateAds
return self._call(method_name, method_parameters, param_aliases, response_type)
def update_campaigns(self, account_id: int, data: str) -> responses.AdsUpdateCampaigns:
"""
Edits advertising campaigns.
:param account_id: Advertising account ID.
:param data: Serialized JSON array of objects that describe changes in campaigns. Description of 'campaign_mod' objects see below.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'ads.updateCampaigns'
response_type = responses.AdsUpdateCampaigns
return self._call(method_name, method_parameters, param_aliases, response_type)
def update_clients(self, account_id: int, data: str) -> responses.AdsUpdateClients:
"""
Edits clients of an advertising agency.
:param account_id: Advertising account ID.
:param data: Serialized JSON array of objects that describe changes in clients. Description of 'client_mod' objects see below.
"""
method_parameters = {k: v for k, v in locals().items() | |
"""
Module for eigenvalue analysis.
"""
import logging
from math import ceil, pi
import numpy as np
import scipy.io
from scipy.linalg import solve
from andes.io.txt import dump_data
from andes.plot import set_latex, set_style
from andes.routines.base import BaseRoutine
from andes.shared import div, matrix, plt, sparse, spdiag, spmatrix
from andes.utils.misc import elapsed
from andes.variables.report import report_info
logger = logging.getLogger(__name__)
DPI = None
class EIG(BaseRoutine):
"""
Eigenvalue analysis routine
"""
def __init__(self, system, config):
super().__init__(system=system, config=config)
self.config.add(plot=0, tol=1e-6)
self.config.add_extra("_help",
plot="show plot after computation",
tol="numerical tolerance to treat eigenvalues as zeros")
self.config.add_extra("_alt", plot=(0, 1))
# internal flags and storage
self.As = None # state matrix after removing the ones associated with zero T consts
self.Asc = None # the original complete As without reordering
self.mu = None # eigenvalues
self.N = None # right eigenvectors
self.W = None # left eigenvectors
self.pfactors = None
# --- related to states with zero time constants (zs) ---
self.zstate_idx = np.array([], dtype=int)
self.nz_counts = None
# --- statistics --
self.n_positive = 0
self.n_zeros = 0
self.n_negative = 0
self.x_name = []
def calc_As(self, dense=True):
r"""
Return state matrix and store to ``self.As``.
Notes
-----
For systems in the mass-matrix formulation,
.. math ::
T \dot{x} = f(x, y) \\
0 = g(x, y)
Assume `T` is non-singular, the state matrix is calculated from
.. math ::
A_s = T^{-1} (f_x - f_y * g_y^{-1} * g_x)
Returns
-------
kvxopt.matrix
state matrix
"""
dae = self.system.dae
self.find_zero_states()
self.x_name = np.array(dae.x_name)
self.As = self._reduce(dae.fx, dae.fy,
dae.gx, dae.gy, dae.Tf,
dense=dense)
if len(self.zstate_idx) > 0:
self.Asc = self.As
self.As = self._reduce(*self._reorder())
return self.As
def _reduce(self, fx, fy, gx, gy, Tf, dense=True):
"""
Reduce algebraic equations (or states associated with zero time constants).
Returns
-------
spmatrix
The reduced state matrix
"""
gyx = matrix(gx)
self.solver.linsolve(gy, gyx)
Tfnz = Tf + np.ones_like(Tf) * np.equal(Tf, 0.0)
iTf = spdiag((1 / Tfnz).tolist())
if dense:
return iTf * (fx - fy * gyx)
else:
return sparse(iTf * (fx - fy * gyx))
def _reorder(self):
"""
reorder As by moving rows and cols associated with zero time constants to the end.
Returns `fx`, `fy`, `gx`, `gy`, `Tf`.
"""
dae = self.system.dae
rows = np.arange(dae.n, dtype=int)
cols = np.arange(dae.n, dtype=int)
vals = np.ones(dae.n, dtype=float)
swaps = []
bidx = self.nz_counts
for ii in range(dae.n - self.nz_counts):
if ii in self.zstate_idx:
while (bidx in self.zstate_idx):
bidx += 1
cols[ii] = bidx
rows[bidx] = ii
swaps.append((ii, bidx))
# swap the variable names
for fr, bk in swaps:
bk_name = self.x_name[bk]
self.x_name[fr] = bk_name
self.x_name = self.x_name[:self.nz_counts]
# compute the permutation matrix for `As` containing non-states
perm = spmatrix(matrix(vals), matrix(rows), matrix(cols))
As_perm = perm * sparse(self.As) * perm
self.As_perm = As_perm
nfx = As_perm[:self.nz_counts, :self.nz_counts]
nfy = As_perm[:self.nz_counts, self.nz_counts:]
ngx = As_perm[self.nz_counts:, :self.nz_counts]
ngy = As_perm[self.nz_counts:, self.nz_counts:]
nTf = np.delete(self.system.dae.Tf, self.zstate_idx)
return nfx, nfy, ngx, ngy, nTf
def calc_eig(self, As=None):
"""
Calculate eigenvalues and right eigen vectors.
This function is a wrapper to ``np.linalg.eig``.
Results are returned but not stored to ``EIG``.
Returns
-------
np.array(dtype=complex)
eigenvalues
np.array()
right eigenvectors
"""
if As is None:
As = self.As
# `mu`: eigenvalues, `N`: right eigenvectors with each column corr. to one eigvalue
mu, N = np.linalg.eig(As)
return mu, N
def _store_stats(self):
"""
Count and store the number of eigenvalues with positive, zero,
and negative real parts using ``self.mu``.
"""
mu_real = self.mu.real
self.n_positive = np.count_nonzero(mu_real > self.config.tol)
self.n_zeros = np.count_nonzero(abs(mu_real) <= self.config.tol)
self.n_negative = np.count_nonzero(mu_real < self.config.tol)
return True
def calc_pfactor(self, As=None):
"""
Compute participation factor of states in eigenvalues.
Each row in the participation factor correspond to one state,
and each column correspond to one mode.
Parameters
----------
As : np.array or None
State matrix to process. If None, use ``self.As``.
Returns
-------
np.array(dtype=complex)
eigenvalues
np.array
participation factor matrix
"""
mu, N = self.calc_eig(As)
n_state = len(mu)
# --- calculate the left eig vector and store to ``W```
# based on orthogonality that `W.T @ N = I`,
# left eigenvector is `inv(N)^T`
Weye = np.eye(n_state)
WT = solve(N, Weye, overwrite_b=True)
W = WT.T
# --- calculate participation factor ---
pfactor = np.abs(W) * np.abs(N)
b = np.ones(n_state)
W_abs = b @ pfactor
pfactor = pfactor.T
# --- normalize participation factor ---
for item in range(n_state):
pfactor[:, item] /= W_abs[item]
pfactor = np.round(pfactor, 5)
return mu, pfactor, N, W
def summary(self):
"""
Print out a summary to ``logger.info``.
"""
out = list()
out.append('')
out.append('-> Eigenvalue Analysis:')
out_str = '\n'.join(out)
logger.info(out_str)
def find_zero_states(self):
"""
Find the indices of states associated with zero time constants in ``x``.
"""
system = self.system
self.zstate_idx = np.array([], dtype=int)
if sum(system.dae.Tf != 0) != len(system.dae.Tf):
self.zstate_idx = np.where(system.dae.Tf == 0)[0]
logger.info("%d states are associated with zero time constants. ", len(self.zstate_idx))
logger.debug([system.dae.x_name[i] for i in self.zstate_idx])
self.nz_counts = system.dae.n - len(self.zstate_idx)
def _pre_check(self):
"""
Helper function for pre-computation checks.
"""
system = self.system
status = True
if system.PFlow.converged is False:
logger.warning('Power flow not solved. Eig analysis will not continue.')
status = False
if system.TDS.initialized is False:
system.TDS.init()
system.TDS.itm_step()
elif system.dae.n == 0:
logger.error('No dynamic model. Eig analysis will not continue.')
status = False
return status
def run(self, **kwargs):
"""
Run small-signal stability analysis.
"""
succeed = False
system = self.system
if not self._pre_check():
system.exit_code += 1
return False
self.summary()
t1, s = elapsed()
self.calc_As()
self.mu, self.pfactors, self.N, self.W = self.calc_pfactor()
self._store_stats()
t2, s = elapsed(t1)
self.exec_time = t2 - t1
logger.info(self.stats())
logger.info('Eigenvalue analysis finished in {:s}.'.format(s))
if not self.system.files.no_output:
self.report()
if system.options.get('state_matrix'):
self.export_mat()
if self.config.plot:
self.plot()
succeed = True
if not succeed:
system.exit_code += 1
return succeed
def stats(self):
"""
Return statistics of results in a string.
"""
out = list()
out.append(' Positive %6g' % self.n_positive)
out.append(' Zeros %6g' % self.n_zeros)
out.append(' Negative %6g' % self.n_negative)
return '\n'.join(out)
def plot(self, mu=None, fig=None, ax=None,
left=-6, right=0.5, ymin=-8, ymax=8, damping=0.05,
line_width=0.5, dpi=DPI, figsize=None, base_color='black',
show=True, latex=True, style='default'):
"""
Plot utility for eigenvalues in the S domain.
Parameters
----------
mu : array, optional
an array of complex eigenvalues
fig : figure handl, optional
existing matplotlib figure handle
ax : axis handle, optional
existing axis handle
left : int, optional
left tick for the x-axis, by default -6
right : float, optional
right tick, by default 0.5
ymin : int, optional
bottom tick, by default -8
ymax : int, optional
top tick, by default 8
damping : float, optional
damping value for which the dash plots are drawn
line_width : float, optional
default line width, by default 0.5
dpi : int, optional
figure dpi
figsize : [type], optional
default figure size, by default None
base_color : str, optional
base color for negative eigenvalues
show : bool, optional
True to show figure after plot, by default True
latex : bool, optional
True to use latex, by default True
Returns
-------
figure
matplotlib figure object
axis
matplotlib axis object
"""
set_style(style)
if mu is None:
mu = self.mu
mu_real = mu.real
mu_imag = mu.imag
p_mu_real, p_mu_imag = list(), list()
z_mu_real, z_mu_imag = list(), list()
n_mu_real, n_mu_imag = list(), list()
for re, im in zip(mu_real, mu_imag):
if abs(re) <= self.config.tol:
z_mu_real.append(re)
z_mu_imag.append(im)
elif re > self.config.tol:
p_mu_real.append(re)
p_mu_imag.append(im)
elif re < -self.config.tol:
n_mu_real.append(re)
n_mu_imag.append(im)
if latex:
set_latex()
if fig is None or ax is None:
fig = plt.figure(dpi=dpi, figsize=figsize)
ax = plt.gca()
ax.scatter(z_mu_real, z_mu_imag, marker='o', s=40, linewidth=0.5, facecolors='none', edgecolors='green')
ax.scatter(n_mu_real, n_mu_imag, marker='x', s=40, linewidth=0.5, color=base_color)
ax.scatter(p_mu_real, p_mu_imag, marker='x', s=40, linewidth=0.5, color='red')
# axes lines
ax.axhline(linewidth=0.5, color='grey', linestyle='--')
ax.axvline(linewidth=0.5, color='grey', linestyle='--')
# TODO: Improve the damping and range
# --- plot 5% damping lines ---
xin = np.arange(left, 0, 0.01)
yneg = xin / damping
ypos = - xin / damping
ax.plot(xin, yneg, color='grey', linewidth=line_width, linestyle='--')
ax.plot(xin, ypos, color='grey', linewidth=line_width, linestyle='--')
# --- damping lines end ---
if latex:
ax.set_xlabel('Real [$s^{-1}$]')
ax.set_ylabel('Imaginary [$s^{-1}$]')
else:
ax.set_xlabel('Real [s -1]')
ax.set_ylabel('Imaginary [s -1]')
ax.set_xlim(left=left, right=right)
ax.set_ylim(ymin, ymax)
if show is True:
plt.show()
return fig, ax
def export_mat(self):
"""
Export state matrix to a ``<CaseName>_As.mat`` file with the | |
listener: ParseTreeListener):
if hasattr(listener, "enterInclude_"):
listener.enterInclude_(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitInclude_"):
listener.exitInclude_(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitInclude_"):
return visitor.visitInclude_(self)
else:
return visitor.visitChildren(self)
def include_(self):
localctx = asm8086Parser.Include_Context(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_include_)
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(asm8086Parser.INCLUDE)
self.state = 204
self.name()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionlistContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(asm8086Parser.ExpressionContext)
else:
return self.getTypedRuleContext(asm8086Parser.ExpressionContext, i)
def getRuleIndex(self):
return asm8086Parser.RULE_expressionlist
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterExpressionlist"):
listener.enterExpressionlist(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitExpressionlist"):
listener.exitExpressionlist(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitExpressionlist"):
return visitor.visitExpressionlist(self)
else:
return visitor.visitChildren(self)
def expressionlist(self):
localctx = asm8086Parser.ExpressionlistContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_expressionlist)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 206
self.expression()
self.state = 211
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == asm8086Parser.T__7:
self.state = 207
self.match(asm8086Parser.T__7)
self.state = 208
self.expression()
self.state = 213
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabelContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def name(self):
return self.getTypedRuleContext(asm8086Parser.NameContext, 0)
def getRuleIndex(self):
return asm8086Parser.RULE_label
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterLabel"):
listener.enterLabel(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitLabel"):
listener.exitLabel(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitLabel"):
return visitor.visitLabel(self)
else:
return visitor.visitChildren(self)
def label(self):
localctx = asm8086Parser.LabelContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_label)
try:
self.enterOuterAlt(localctx, 1)
self.state = 214
self.name()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def multiplyingExpression(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(asm8086Parser.MultiplyingExpressionContext)
else:
return self.getTypedRuleContext(asm8086Parser.MultiplyingExpressionContext, i)
def SIGN(self, i: int = None):
if i is None:
return self.getTokens(asm8086Parser.SIGN)
else:
return self.getToken(asm8086Parser.SIGN, i)
def getRuleIndex(self):
return asm8086Parser.RULE_expression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterExpression"):
listener.enterExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitExpression"):
listener.exitExpression(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitExpression"):
return visitor.visitExpression(self)
else:
return visitor.visitChildren(self)
def expression(self):
localctx = asm8086Parser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 216
self.multiplyingExpression()
self.state = 221
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 18, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 217
self.match(asm8086Parser.SIGN)
self.state = 218
self.multiplyingExpression()
self.state = 223
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 18, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class MultiplyingExpressionContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def argument(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(asm8086Parser.ArgumentContext)
else:
return self.getTypedRuleContext(asm8086Parser.ArgumentContext, i)
def getRuleIndex(self):
return asm8086Parser.RULE_multiplyingExpression
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterMultiplyingExpression"):
listener.enterMultiplyingExpression(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitMultiplyingExpression"):
listener.exitMultiplyingExpression(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitMultiplyingExpression"):
return visitor.visitMultiplyingExpression(self)
else:
return visitor.visitChildren(self)
def multiplyingExpression(self):
localctx = asm8086Parser.MultiplyingExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_multiplyingExpression)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 224
self.argument()
self.state = 229
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 19, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 225
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & (
(1 << asm8086Parser.T__8) | (1 << asm8086Parser.T__9) | (1 << asm8086Parser.T__10) | (
1 << asm8086Parser.T__11))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 226
self.argument()
self.state = 231
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 19, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgumentContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def number(self):
return self.getTypedRuleContext(asm8086Parser.NumberContext, 0)
def dollar(self):
return self.getTypedRuleContext(asm8086Parser.DollarContext, 0)
def register_(self):
return self.getTypedRuleContext(asm8086Parser.Register_Context, 0)
def name(self):
return self.getTypedRuleContext(asm8086Parser.NameContext, 0)
def string_(self):
return self.getTypedRuleContext(asm8086Parser.String_Context, 0)
def expression(self):
return self.getTypedRuleContext(asm8086Parser.ExpressionContext, 0)
def ptr(self):
return self.getTypedRuleContext(asm8086Parser.PtrContext, 0)
def NOT(self):
return self.getToken(asm8086Parser.NOT, 0)
def OFFSET(self):
return self.getToken(asm8086Parser.OFFSET, 0)
def LENGTH(self):
return self.getToken(asm8086Parser.LENGTH, 0)
def getRuleIndex(self):
return asm8086Parser.RULE_argument
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterArgument"):
listener.enterArgument(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitArgument"):
listener.exitArgument(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitArgument"):
return visitor.visitArgument(self)
else:
return visitor.visitChildren(self)
def argument(self):
localctx = asm8086Parser.ArgumentContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_argument)
try:
self.state = 263
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 21, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 232
self.number()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 233
self.dollar()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 234
self.register_()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 235
self.name()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 236
self.string_()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 237
self.match(asm8086Parser.T__3)
self.state = 238
self.expression()
self.state = 239
self.match(asm8086Parser.T__4)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 243
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [asm8086Parser.SIGN, asm8086Parser.NUMBER]:
self.state = 241
self.number()
pass
elif token in [asm8086Parser.NAME]:
self.state = 242
self.name()
pass
elif token in [asm8086Parser.T__12]:
pass
else:
pass
self.state = 245
self.match(asm8086Parser.T__12)
self.state = 246
self.expression()
self.state = 247
self.match(asm8086Parser.T__13)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 249
self.ptr()
self.state = 250
self.expression()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 252
self.match(asm8086Parser.NOT)
self.state = 253
self.expression()
pass
elif la_ == 10:
self.enterOuterAlt(localctx, 10)
self.state = 254
self.match(asm8086Parser.OFFSET)
self.state = 255
self.expression()
pass
elif la_ == 11:
self.enterOuterAlt(localctx, 11)
self.state = 256
self.match(asm8086Parser.LENGTH)
self.state = 257
self.expression()
pass
elif la_ == 12:
self.enterOuterAlt(localctx, 12)
self.state = 258
self.register_()
self.state = 259
self.match(asm8086Parser.T__1)
self.state = 261
self.expression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PtrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def PTR(self):
return self.getToken(asm8086Parser.PTR, 0)
def BYTE(self):
return self.getToken(asm8086Parser.BYTE, 0)
def WORD(self):
return self.getToken(asm8086Parser.WORD, 0)
def DWORD(self):
return self.getToken(asm8086Parser.DWORD, 0)
def getRuleIndex(self):
return asm8086Parser.RULE_ptr
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterPtr"):
listener.enterPtr(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitPtr"):
listener.exitPtr(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitPtr"):
return visitor.visitPtr(self)
else:
return visitor.visitChildren(self)
def ptr(self):
localctx = asm8086Parser.PtrContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_ptr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 266
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & (
(1 << asm8086Parser.BYTE) | (1 << asm8086Parser.WORD) | (1 << asm8086Parser.DWORD))) != 0):
self.state = 265
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & (
(1 << asm8086Parser.BYTE) | (1 << asm8086Parser.WORD) | (1 << asm8086Parser.DWORD))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 268
self.match(asm8086Parser.PTR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DollarContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def DOLLAR(self):
return self.getToken(asm8086Parser.DOLLAR, 0)
def getRuleIndex(self):
return asm8086Parser.RULE_dollar
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDollar"):
listener.enterDollar(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDollar"):
listener.exitDollar(self)
def accept(self, visitor: ParseTreeVisitor):
if hasattr(visitor, "visitDollar"):
return visitor.visitDollar(self)
else:
return visitor.visitChildren(self)
def dollar(self):
localctx = asm8086Parser.DollarContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_dollar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 270
self.match(asm8086Parser.DOLLAR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Register_Context(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def REGISTER(self):
return self.getToken(asm8086Parser.REGISTER, 0)
def getRuleIndex(self):
return asm8086Parser.RULE_register_
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterRegister_"):
listener.enterRegister_(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitRegister_"):
listener.exitRegister_(self)
| |
lootbox and obtained:\n"
"".format(ctx.message.author.name, lootbox.title()))
for pkmn in pokemon_obtained.items():
self._move_pokemon_to_inventory(trainer_profile,
pkmn[0],
pkmn[1])
if pkmn[1]:
msg += "**{}(Shiny)**\n".format(pkmn[0].title())
self._post_pokemon_catch(ctx, random_pkmn, pkmn_img_path, random_pkmnball, is_shiny, catch_condition, lootbox)
else:
msg += "**{}**\n".format(pkmn[0].title())
em = discord.Embed(title="Lootbox",
description=msg,
colour=lootbox_color)
em.set_thumbnail(url=thumbnail_url)
await self.bot.say(embed=em)
return True
async def open_lootbox(self, ctx, lootbox):
"""
Opens a lootbox from the trainer's inventory based on
the trainer's specified choice
@param lootbox - lootbox to open
"""
try:
if lootbox == 'b':
lootbox = BRONZE
elif lootbox == 's':
lootbox = SILVER
elif lootbox == 'g':
lootbox = GOLD
elif lootbox == 'l':
lootbox = LEGEND
user_id = ctx.message.author.id
valid_user = await self._valid_user(user_id)
if not valid_user:
return
trainer_profile = self.trainer_data[user_id]
if "lootbox" in trainer_profile:
if lootbox in trainer_profile["lootbox"]:
if trainer_profile["lootbox"][lootbox] > 0:
success = await self._generate_lootbox_pokemon(ctx,
lootbox)
if success:
trainer_profile["lootbox"][lootbox] -= 1
self._save_trainer_file(self.trainer_data)
else:
await self.bot.say("<@{}> you don't have any {} "
"lootboxes.".format(user_id,
lootbox))
else:
await self.bot.say("Lootbox does not exist or has not "
"been obtained yet.")
else:
await self.bot.say("Trainer does not have a lootbox.")
except Exception as e:
print("Failed to open a lootbox. See error.log.")
logger.error("Exception: {}".format(str(e)))
def _vendor_roll(self, ctx):
"""
Rolls the trade that the vendor wants to make
"""
i = 0
egg = "egg"
egg_manaphy = "egg-manaphy"
user_id = ctx.message.author.id
night_vendor_event = self.event.event_data["night_vendor_event"]
if user_id not in self.vendor_sales:
shiny_rate_multiplier = night_vendor_event["shiny_rate_multiplier"]
random_pkmn, pkmn_img_path, is_shiny = self._generate_random_pokemon(shiny_rate_multiplier)
while (egg in random_pkmn or egg_manaphy in random_pkmn
or "-" in random_pkmn):
random_pkmn, pkmn_img_path, is_shiny = self._generate_random_pokemon(shiny_rate_multiplier)
self.vendor_sales[user_id] = {}
self.vendor_sales[user_id]["pkmn"] = random_pkmn
self.vendor_sales[user_id]["pkmn_img_path"] = pkmn_img_path
self.vendor_sales[user_id]["shiny"] = is_shiny
if not self.vendor_trade_list[user_id]:
num_pkmn_to_trade = night_vendor_event["num_pkmn_to_trade"]
while i < num_pkmn_to_trade:
t_pkmn = self._generate_random_pokemon(0)[0]
while (egg in t_pkmn or egg_manaphy in t_pkmn
or "-" in t_pkmn):
t_pkmn = self._generate_random_pokemon(0)[0]
self.vendor_trade_list[user_id].append(t_pkmn)
i += 1
async def _vendor_info(self, ctx):
"""
Displays info on what the vendor wants to trade
"""
t_pkmn_list = ''
user_id = ctx.message.author.id
for t_pkmn in self.vendor_trade_list[user_id]:
t_pkmn_list += '{}\n'.format(t_pkmn.title())
pkmn = self.vendor_sales[user_id]["pkmn"]
if self.vendor_sales[user_id]["shiny"]:
pkmn += "(Shiny)"
await self.bot.say('The **Night Vendor** wants to trade '
'**{}** a **{}** for **all** of the following '
'pokemon:\n**{}**'
''.format(ctx.message.author.name,
pkmn.title(),
t_pkmn_list))
async def _vendor_reroll(self, ctx):
"""
Rerolls the vendor's trade for the user of interest
"""
user_id = ctx.message.author.id
trainer_profile = self.trainer_data[user_id]
if trainer_profile["reroll_count"] > 0:
if user_id in self.vendor_sales:
self.vendor_sales.pop(user_id)
if user_id in self.vendor_trade_list:
self.vendor_trade_list.pop(user_id)
t_pkmn_list = ''
self._vendor_roll(ctx)
trainer_profile["reroll_count"] -= 1
self._save_trainer_file(self.trainer_data)
pkmn = self.vendor_sales[user_id]["pkmn"]
for t_pkmn in self.vendor_trade_list[user_id]:
t_pkmn_list += '**{}**\n'.format(t_pkmn.title())
if self.vendor_sales[user_id]["shiny"]:
pkmn += "(Shiny)"
await self.bot.say("**{}** has re-rolled the vendor's trade (**{}**"
" re-rolls remaining). The **Night Vendor** "
"wants to trade **{}** for **all** of the "
"following pokemon:\n{}"
"".format(ctx.message.author.name,
trainer_profile["reroll_count"],
pkmn.title(),
t_pkmn_list))
else:
await self.bot.say("<@{}>, you don't have anymore rolls."
"".format(user_id))
async def _vendor_trade(self, ctx):
"""
Trades the vendor
"""
msg = ''
user_id = ctx.message.author.id
trainer_profile = self.trainer_data[user_id]
trade_verified = True
for p in self.vendor_trade_list[user_id]:
if p not in trainer_profile["pinventory"]:
msg += "**{}**\n".format(p.title())
trade_verified = False
if trade_verified:
for pkmn in self.vendor_trade_list[user_id]:
successful = await self.release_pokemon(ctx,
pkmn,
1,
False,
False)
if not successful:
self.trainer_data = self._load_trainer_file()
return
vendor_pkmn_sale = self.vendor_sales[user_id]["pkmn"]
pkmn_img_path = self.vendor_sales[user_id]["pkmn_img_path"]
is_shiny = self.vendor_sales[user_id]["shiny"]
self._move_pokemon_to_inventory(trainer_profile,
vendor_pkmn_sale,
is_shiny)
self.vendor_sales.pop(user_id)
self.vendor_trade_list.pop(user_id)
random_pokeball = random.choice(list(self.pokeball))
await self._post_pokemon_catch(ctx,
vendor_pkmn_sale,
pkmn_img_path,
random_pokeball,
is_shiny,
"has traded the night vendor for",
None)
trainer_profile["reroll_count"] = 0
self._save_trainer_file(self.trainer_data)
else:
await self.bot.say("Unable to trade. The following Pokémon are "
"missing:\n{}".format(msg))
async def vendor_options(self, ctx, option):
"""
Carries out vendor operations depending on the option
@param ctx - context of the command
@param option - option the user input
"""
try:
if self.event.night_vendor:
user_id = ctx.message.author.id
valid_user = await self._valid_user(user_id)
if not valid_user:
return
trainer_profile = self.trainer_data[user_id]
if (trainer_profile["reroll_count"] > 0
or user_id in self.vendor_sales):
if user_id not in self.vendor_sales:
self._vendor_roll(ctx)
if option == "i":
await self._vendor_info(ctx)
elif option == "r":
await self._vendor_reroll(ctx)
elif option == "t":
await self._vendor_trade(ctx)
else:
await self.bot.say("`{}` is not a valid choice"
"".format(option))
else:
await self.bot.say("<@{}>, the night vendor is done "
"doing business with you for the "
"evening.".format(user_id))
else:
await self.bot.say("The night vendor is not here.")
except Exception as e:
print("Failed to speak with vendor. See error.log")
logger.error("Exception: {}".format(str(e)))
async def claim_daily(self, ctx):
"""
Claims the daily lootbox
"""
try:
user_id = ctx.message.author.id
username = ctx.message.author.name
if user_id not in self.trainer_data:
user_obj = await self.bot.get_user_info(user_id)
self.trainer_data[user_id] = {}
self.trainer_data[user_id]["pinventory"] = {}
self.trainer_data[user_id]["timer"] = False
self.trainer_cache[user_id] = user_obj
trainer_profile = self.trainer_data[user_id]
if "lootbox" not in trainer_profile:
trainer_profile["lootbox"] = {}
if BRONZE not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][BRONZE] = 0
if SILVER not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][SILVER] = 0
if GOLD not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][GOLD] = 0
if LEGEND not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][LEGEND] = 0
if "daily_tokens" not in trainer_profile:
trainer_profile["daily_tokens"] = 0
self._save_trainer_file(self.trainer_data)
if user_id not in self.daily_data:
lootbox = self._generate_lootbox(trainer_profile, daily=True)
trainer_profile["daily_tokens"] += 1
self.daily_data.append(user_id)
self._save_trainer_file(self.trainer_data)
self._save_daily_file(self.daily_data)
await self.bot.say("**{}** claimed their daily to get a **{}**"
" lootbox as well as a daily token."
"".format(username,
lootbox.title()))
else:
await self.bot.say("**{}** has already claimed their daily "
"lootbox".format(username))
except Exception as e:
self.daily_data = self._load_daily_file()
print("Failed to claim daily. See error.log")
logger.error("Exception: {}".format(str(e)))
async def claim_gift(self, ctx):
"""
Claims the available gift
"""
try:
user_id = ctx.message.author.id
username = ctx.message.author.name
pkmn_msg = ''
lootbox_msg = ''
if user_id not in self.trainer_data:
user_obj = await self.bot.get_user_info(user_id)
self.trainer_data[user_id] = {}
self.trainer_data[user_id]["pinventory"] = {}
self.trainer_data[user_id]["timer"] = False
self.trainer_cache[user_id] = user_obj
trainer_profile = self.trainer_data[user_id]
if "lootbox" not in trainer_profile:
trainer_profile["lootbox"] = {}
if BRONZE not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][BRONZE] = 0
if SILVER not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][SILVER] = 0
if GOLD not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][GOLD] = 0
if LEGEND not in trainer_profile["lootbox"]:
trainer_profile["lootbox"][LEGEND] = 0
pinventory = trainer_profile["pinventory"]
if not self.config_data["gift"]:
await self.bot.say("No gift to claim.")
return
if user_id not in self.gift_data:
pokemon_list = self.config_data["gift_list"]["pokemon"]
lootbox_list = self.config_data["gift_list"]["lootbox"]
for pkmn in pokemon_list:
if pkmn not in pinventory:
pinventory[pkmn] = pokemon_list[pkmn]
else:
pinventory[pkmn] += pokemon_list[pkmn]
pkmn_msg += "**{}** x{}\n".format(pkmn.title(),
pokemon_list[pkmn])
for lootbox in lootbox_list:
trainer_profile["lootbox"][lootbox] += lootbox_list[lootbox]
lootbox_msg += "**{}** x{}\n".format(lootbox.title(),
lootbox_list[lootbox])
self.gift_data.append(user_id)
self._save_trainer_file(self.trainer_data)
self._save_gift_file(self.gift_data)
em = discord.Embed(title="{}'s Gift\n"
"".format(username),
colour=0xFFFFFF)
if pokemon_list:
em.add_field(name="Pokemon",
value=pkmn_msg)
if lootbox_list:
em.add_field(name="Lootbox",
value=lootbox_msg)
await self.bot.say(embed=em)
else:
await self.bot.say("<@{}>, you've already claimed your "
"gift".format(user_id))
except Exception as e:
self.gift_data = self._load_gift_file()
print("Failed to claim gift. See error.log")
logger.error("Exception: {}".format(str(e)))
async def display_daily_tokens(self, ctx):
"""
Displays the player's daily token
"""
try:
user_id = ctx.message.author.id
if user_id not in self.trainer_data:
await self.bot.say("Please catch a pokemon with `p.c` first.")
trainer_profile = self.trainer_data[user_id]
if "daily_tokens" not in trainer_profile:
trainer_profile["daily_tokens"] = 0
self._save_trainer_file(self.trainer_data)
await self.bot.say("<@{}> currently has **{}** daily tokens."
"".format(user_id,
trainer_profile["daily_tokens"]))
except Exception as e:
print("Failed to display daily tokens.")
logger.error("Exception: {}".format(str(e)))
async def daily_shop(self, ctx, option, item_num):
"""
Displays the daily shop via options
"""
try:
user_id = ctx.message.author.id
trainer_profile = self.trainer_data[user_id]
if user_id not in self.trainer_data:
await self.bot.say("Please catch a pokemon with `p.c` first.")
return
if option == "i":
menu_items = ("[1] - Bronze lootbox (**{}** tokens)\n"
"[2] - Silver lootbox (**{}** tokens)\n"
"[3] - Gold lootbox (**{}** tokens)\n"
"[4] - Legendary lootbox (**{}** tokens)\n"
"[5] - Random shiny pokemon (**{}** tokens)\n"
"".format(BRONZE_LOOTBOX_PRICE,
SILVER_LOOTBOX_PRICE,
GOLD_LOOTBOX_PRICE,
LEGENDARY_LOOTBOX_PRICE,
RANDOM_SHINY_PRICE))
em = discord.Embed(title="Daily Token Shop",
description=menu_items)
await self.bot.say(embed=em)
elif option == "b":
if item_num is None:
await self.bot.say("Please enter the item number you wish to buy.")
return
token_num = int(trainer_profile["daily_tokens"])
if item_num == '1':
if token_num < BRONZE_LOOTBOX_PRICE:
await self.bot.say("<@{}>, you do not have enough tokens."
"".format(user_id))
return
trainer_profile["daily_tokens"] = token_num - BRONZE_LOOTBOX_PRICE
trainer_profile["lootbox"][BRONZE] += 1
await self.bot.say("<@{}> bought a **Bronze** lootbox.".format(user_id))
elif item_num == '2':
if token_num < SILVER_LOOTBOX_PRICE:
await self.bot.say("<@{}>, you do not have enough tokens."
"".format(user_id))
return
trainer_profile["daily_tokens"] = token_num - SILVER_LOOTBOX_PRICE
trainer_profile["lootbox"][SILVER] += 1
await self.bot.say("<@{}> bought a **Silver** lootbox.".format(user_id))
elif item_num == '3':
if token_num < GOLD_LOOTBOX_PRICE:
await self.bot.say("<@{}>, you do not have enough tokens."
"".format(user_id))
return
trainer_profile["daily_tokens"] = token_num - GOLD_LOOTBOX_PRICE
trainer_profile["lootbox"][SILVER] += 1
await self.bot.say("<@{}> bought a **Gold** lootbox.".format(user_id))
elif item_num == '4':
if token_num < LEGENDARY_LOOTBOX_PRICE:
await self.bot.say("<@{}>, you do not have enough tokens."
"".format(user_id))
return
trainer_profile["daily_tokens"] = token_num - LEGENDARY_LOOTBOX_PRICE
| |
from argparse import ArgumentParser
import numpy as np
from .calculations import main_print_point_bidim_dpa
from .graphics import main_graph
from .save_output import save_output_jpg, save_output_txt
def init_parser():
'''
Initialization of the argument parser. Version as optional argument.
Returns
-------------------
parser : argparse.ArgumentParser
An instance of a command line arguments parser.
'''
parser = ArgumentParser(prog='B_field',
usage='%(prog)s [options] path',
description='''Evaluation both of the effective magnetic induction B in a
given point (xp, yp) and the DPA (distanza di prima approssimazione),
due to single or double triad of cables.''',
fromfile_prefix_chars='@',
epilog='SINGLE/DOUBLE TRIAD NEEDED. SINGLE/DOUBLE \"OPTIONAL\" ARGUMENT IN ORDER TO EVALUATE SOMETHING.\n')
parser.add_argument("-v", "--version", action="version",
version=f"{parser.prog} version 0.1.0.dev3")
return parser
def init_subparser_single(subparsers):
'''
Initialization of the subparser "single". Optional and positional arguments are listed in order of entry on the command line.
Parameters
-------------------
subparsers : argparse._SubParsersAction
A subparser action, it will be populated with a subparser instance.
Returns
-------------------
single_parser : argparse.ArgumentParser
An instance of a command line arguments parser (subparser in this specific case).
'''
# Single triad
single_parser = subparsers.add_parser('single', help='Calculate the magnetic induction B or the DPA for a single triad of cables',
description='''Positional arguments can be given either manually one by one or using a configuration file (prefix character: @).
Choose an optional argument in order to evaluate something.''')
# OPTIONAL ARGUMENTS
single_parser.add_argument('-point', '-p', action='store_true', help='Point estimate of the magnetic induction B in (xp, yp)')
single_parser.add_argument('-bidim', '-b', action='store_true', help='2D estimate of the magnetic induction B around (xp, yp)')
single_parser.add_argument('-graph', '-g', action='store_true', help='Graph of the 2D estimate of the magnetic induction B around (xp, yp)')
single_parser.add_argument('-dpa', '-d', type=float, nargs=1, metavar='lim_val', help='''Estimate of the DPA (distanza di prima approssimazione)
for the given configuration at \'lim_val\' microTesla. Suggested lim_values: 3, 10''')
single_parser.add_argument('-save', '-s', type=str, nargs=2, metavar=('dest', 'filename'), help='Save the output in \'dest\' repository, with \'filename\' denomination')
# POSITIONAL ARGUMENTS
single_parser.add_argument('xp', type=float, help='Abscissa (m) of the point of interest')
single_parser.add_argument('yp', type=float, help='Ordinate (m) of the point of interest')
single_parser.add_argument('diam_cables', type=float, help='Diameter (mm) of the cables used')
single_parser.add_argument('current', type=int, help='''Current (A) - PCSN (Portata in corrente
in servizio nominale - i.e. current flowing inside the power line''')
single_parser.add_argument('ph_1_deg', type=float, help='Initial phase (deg) - cable 1')
single_parser.add_argument('x1', type=float, help='Abscissa (m) of the first cable (1)')
single_parser.add_argument('y1', type=float, help='Ordinate (m) of the first cable (1)')
single_parser.add_argument('ph_2_deg', type=float, help='Initial phase (deg) - cable 2')
single_parser.add_argument('x2', type=float, help='Abscissa (m) of the second cable (2)')
single_parser.add_argument('y2', type=float, help='Ordinate (m) of the second cable (2)')
single_parser.add_argument('ph_3_deg', type=float, help='Initial phase (deg) - cable 3')
single_parser.add_argument('x3', type=float, help='Abscissa (m) of the third cable (3)')
single_parser.add_argument('y3', type=float, help='Ordinate (m) of the third cable (3)')
return single_parser
def init_subparser_double(subparsers):
'''
Initialization of the subparser "double". Optional and positional arguments are listed in order of entry on the command line.
Parameters
-------------------
subparsers : argparse._SubParsersAction
A subparser action, it will be populated with a subparser instance.
Returns
-------------------
double_parser : argparse.ArgumentParser
An instance of a command line arguments parser (subparser in this specific case).
'''
# Double triad
double_parser = subparsers.add_parser('double', help='Calculate the magnetic induction B or the DPA for a double triad of cables',
description='''Positional arguments can be given either manually one by one or using a configuration file (prefix character: @).
Choose an optional argument in order to evaluate something.''')
# OPTIONAL ARGUMENTS
double_parser.add_argument('-point', '-p', action='store_true', help='Point estimate of the magnetic induction B in (xp, yp)')
double_parser.add_argument('-bidim', '-b', action='store_true', help='2D estimate of the magnetic induction B around (xp, yp)')
double_parser.add_argument('-graph', '-g', action='store_true', help='Graph of the 2D estimate of the magnetic induction B around (xp, yp)')
double_parser.add_argument('-dpa', '-d', type=float, nargs=1, metavar='lim_val', help='''Estimate of the DPA (distanza di prima approssimazione)
for the given configuration at \'lim_val\' microTesla. Suggested lim_values: 3, 10''')
double_parser.add_argument('-save', '-s', type=str, nargs=2, metavar=('dest', 'filename'), help='Save the output in \'dest\' repository, with \'filename\' denomination')
# POSITIONAL ARGUMENTS
double_parser.add_argument('xp', type=float, help='Abscissa (m) of the point of interest')
double_parser.add_argument('yp', type=float, help='Ordinate (m) of the point of interest')
double_parser.add_argument('diam_cables', type=float, help='Diameter (mm) of the cables used')
double_parser.add_argument('A_current', type=int, help='''Current (A) of triad A - PCSN (Portata
in corrente in servizio nominale - i.e. current flowing inside the power line A''')
double_parser.add_argument('A_ph_1_deg', type=float, help='Initial phase (deg) - cable 1A')
double_parser.add_argument('A_x1', type=float, help='Abscissa (m) of the first cable (1A)')
double_parser.add_argument('A_y1', type=float, help='Ordinate (m) of the first cable (1A)')
double_parser.add_argument('A_ph_2_deg', type=float, help='Initial phase (deg) - cable 2A')
double_parser.add_argument('A_x2', type=float, help='Abscissa (m) of the second cable (2A)')
double_parser.add_argument('A_y2', type=float, help='Ordinate (m) of the second cable (2A)')
double_parser.add_argument('A_ph_3_deg', type=float, help='Initial phase (deg) - cable 3A')
double_parser.add_argument('A_x3', type=float, help='Abscissa (m) of the third cable (3A)')
double_parser.add_argument('A_y3', type=float, help='Ordinate (m) of the third cable (3A)')
double_parser.add_argument('B_current', type=int, help='''Current (A) of triad B - PCSN (Portata
in corrente in servizio nominale - i.e. current flowing inside the power line B''')
double_parser.add_argument('B_ph_1_deg', type=float, help='Initial phase (deg) - cable 1B')
double_parser.add_argument('B_x1', type=float, help='Abscissa (m) of the first cable (1B)')
double_parser.add_argument('B_y1', type=float, help='Ordinate (m) of the first cable (1B)')
double_parser.add_argument('B_ph_2_deg', type=float, help='Initial phase (deg) - cable 2B')
double_parser.add_argument('B_x2', type=float, help='Abscissa (m) of the second cable (2B)')
double_parser.add_argument('B_y2', type=float, help='Ordinate (m) of the second cable (2B)')
double_parser.add_argument('B_ph_3_deg', type=float, help='Initial phase (deg) - cable 3B')
double_parser.add_argument('B_x3', type=float, help='Abscissa (m) of the third cable (3B)')
double_parser.add_argument('B_y3', type=float, help='Ordinate (m) of the third cable (3B)')
return double_parser
def single_args_packaging(args):
'''
Packaging of the arguments for a single triad in the wanted fashion. The cables' diameter is transformed from millimeters to meters.
Parameters
-------------------
args : argparse.Namespace
Namespace object built up from attributes parsed out of the command line.
Returns
-------------------
xp, yp, diam_cables : float
Abscissa (m) and ordinate (m) of the point of interest.
Cables' diameter (mm).
current, cables_array : numpy.ndarray
Current (A) flowing inside the power line.
Array containing the phases (deg), abscissas (m) and ordinates (m) of the cables.
Notes
-------------------
NaN values are used in order to mantain the overall numpy array structure similar to the double triad's one, thus exploiting the same "for" loops.
NaNs are preferable to zeros since in the visualization algorithm NaN values are not plotted automatically.
'''
xp, yp = args.xp, args.yp
diam_cables = args.diam_cables*0.001
current = np.array([args.current, np.nan])
cables_array = np.array([[[args.ph_1_deg, args.x1, args.y1],
[args.ph_2_deg, args.x2, args.y2],
[args.ph_3_deg, args.x3, args.y3]],
[[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]]])
return xp, yp, diam_cables, current, cables_array
def double_args_packaging(args):
'''
Packaging of the arguments for a double triad in the wanted fashion. The cables' diameter is transformed from millimeters to meters.
Parameters
-------------------
args : argparse.Namespace
Namespace object built up from attributes parsed out of the command line.
Returns
-------------------
xp, yp, diam_cables : float
Abscissa (m) and ordinate (m) of the point of interest.
Cables' diameter (mm).
currents, cables_array : numpy.ndarray
Currents (A) flowing inside the power lines.
Array containing the phases (deg), abscissas (m) and ordinates (m) of the cables.
'''
xp, yp = args.xp, args.yp
diam_cables = args.diam_cables*0.001
currents = np.array([args.A_current, args.B_current])
cables_array = np.array([[[args.A_ph_1_deg, args.A_x1, args.A_y1],
[args.A_ph_2_deg, args.A_x2, args.A_y2],
[args.A_ph_3_deg, args.A_x3, args.A_y3]],
[[args.B_ph_1_deg, args.B_x1, args.B_y1],
[args.B_ph_2_deg, args.B_x2, args.B_y2],
[args.B_ph_3_deg, args.B_x3, args.B_y3]]])
return xp, yp, diam_cables, currents, cables_array
def main(argv=None):
'''
*COMMAND LINE INTERFACE*
The function parses the arguments passed by the user through the command line
providing two options: single or double power line, i.e. three or six cables.
Depending on the option selected, data are packed in numpy arrays of different
fashion and the corresponding calculation functions are called.
The result are:
- point : the point estimate of the magnetic induction B in (xp, yp);
- bidim : the 2D estimate of the magnetic induction B around (xp, yp);
- graph : graphical representation of the 2D estimate of the magnetic induction B around (xp, yp);
- dpa : estimate of the DPA (distanza di prima approssimazione) for the given configuration at \'lim_val\' microTesla. Suggested lim_values: 3, 10.
With the 'save' optional argument is possible to save the output in a file (respectively: point, bidim, dpa in .txt and graph in .jpg)
'''
parser = init_parser()
subparsers = parser.add_subparsers(help='Possible cable configurations', dest='subparser')
| |
in hparams:{}'.format(PREFIX, hparams['act_func_list']))
self.act_func_list = hparams['act_func_list']
if self.act_func_list is None:
self.act_func_list = np.repeat(NNModel.DEFAULT_ACT_FUNC_KEY, [self.n_layer - 1])
print('{}Use act_func_list with default value:{}'.format(PREFIX, self.act_func_list))
self.act_func_ref_list = self.set_act_func_ref_list(self.act_func_list, self.n_layer)
print('{}act_func_ref_list is set :{}'.format(PREFIX, self.act_func_ref_list))
# About minibatch operation
self.set_evaluate_in_minibatch(hparams)
# About sub model
self.set_hparams_on_sub_model(hparams)
# Abount ONNX export
self.set_export_to_onnx(hparams)
self.test_only_mode = False
if hparams and 'test_only_mode' in hparams.keys():
print('{}Use test_only_mode in hparams:{}'.format(PREFIX, hparams['test_only_mode']))
self.test_only_mode = hparams['test_only_mode']
else:
print('{}TODO Use test_only_mode with default value:{}'.format(PREFIX, self.test_only_mode))
# whether has ResNet or not
self.has_res_net = False
if hparams and 'has_res_net' in hparams.keys():
print('{}Use has_res_net in hparams:{}'.format(PREFIX, hparams['has_res_net']))
self.has_res_net = hparams['has_res_net']
else:
print('{}Use has_res_net with default value:{}'.format(PREFIX, self.has_res_net))
# about batch normalization
self.has_batch_norm = True
if hparams and 'has_batch_norm' in hparams.keys():
print('{}Use has_batch_norm in hparams:{}'.format(PREFIX, hparams['has_batch_norm']))
self.has_batch_norm = hparams['has_batch_norm']
else:
print('{}TODO Use has_batch_norm with default value:{}'.format(PREFIX, self.has_batch_norm))
if self.has_batch_norm:
self.bn_decay = NNModel.DEFAULT_BN_DECAY
if hparams and 'bn_decay' in hparams.keys():
print('{}Use bn_decay in hparams:{}'.format(PREFIX, hparams['bn_decay']))
self.bn_decay = hparams['bn_decay']
else:
print('{}TODO Use bn_decay with default value:{}'.format(PREFIX, self.bn_decay))
self.bn_eps = NNModel.DEFAULT_BN_ESP
if hparams and 'bn_eps' in hparams.keys():
print('{}Use bn_eps in hparams:{}'.format(PREFIX, hparams['bn_eps']))
self.bn_eps = hparams['bn_eps']
else:
print('{}TODO Use bn_eps with default value:{}'.format(PREFIX, self.bn_eps))
self.annotation_col_names = None
if hparams and 'annotation_col_names' in hparams.keys():
print('{}Use annotation_col_names in hparams:{}'.format(PREFIX, hparams['annotation_col_names']))
self.annotation_col_names = hparams['annotation_col_names']
self.annotation_col_size = 0
if self.annotation_col_names is not None:
self.annotation_col_size = len(self.annotation_col_names)
# about mask_rate
self.mask_rate = None
if hparams and 'mask_rate' in hparams.keys():
print('{}Use mask_rate in hparams:{}'.format(PREFIX, hparams['mask_rate']))
self.mask_rate = hparams['mask_rate']
if self.mask_rate is not None:
try:
self.mask_rate = float(self.mask_rate)
except ValueError:
print('{}mask_rate is not float type. reset with None'.format(PREFIX))
self.mask_rate = None
# output_data_names
if hparams and 'output_data_names' in hparams.keys():
print('{}Use output_data_names in hparams:{}'.format(PREFIX, hparams['output_data_names']))
self.output_data_names = hparams['output_data_names']
if self.output_data_names is not None:
try:
if not isinstance(self.output_data_names, list):
raise ValueError
print('output_data_names size:{}'.format(len(self.output_data_names)))
except ValueError:
print('{}output_data_names is not list type. reset with None'.format(PREFIX))
self.output_data_names = None
self.restore_var_name_list = None
if hparams and 'restore_var_name_list' in hparams.keys():
print('{}Use restore_var_name_list in hparams:{}'.format(PREFIX, hparams['restore_var_name_list']))
self.restore_var_name_list = hparams['restore_var_name_list']
self.untrainable_var_name_list = None
if hparams and 'untrainable_var_name_list' in hparams.keys():
print('{}Use untrainable_var_name_list in hparams:{}'.format(PREFIX, hparams['untrainable_var_name_list']))
self.untrainable_var_name_list = hparams['untrainable_var_name_list']
# plot settings
self.plot_x_label = None
if hparams and 'plot_x_label' in hparams.keys():
print('{}Use plot_x_label in hparams:{}'.format(PREFIX, hparams['plot_x_label']))
self.plot_x_label = hparams['plot_x_label']
self.plot_y_label = None
if hparams and 'plot_y_label' in hparams.keys():
print('{}Use plot_y_label in hparams:{}'.format(PREFIX, hparams['plot_y_label']))
self.plot_y_label = hparams['plot_y_label']
self.plot_x_data_name_in_annotation = None
if hparams and 'plot_x_data_name_in_annotation' in hparams.keys():
print('{}Use plot_x_data_name_in_annotation in hparams:{}'.format(PREFIX, hparams['plot_x_data_name_in_annotation']))
self.plot_x_data_name_in_annotation = hparams['plot_x_data_name_in_annotation']
self.plot_group_data_name_in_annotation = None
if hparams and 'plot_group_data_name_in_annotation' in hparams.keys():
print('{}Use plot_group_data_name_in_annotation in hparams:{}'.format(PREFIX, hparams['plot_group_data_name_in_annotation']))
self.plot_group_data_name_in_annotation = hparams['plot_group_data_name_in_annotation']
self.plot_x_range = None
if hparams and 'plot_x_range' in hparams.keys():
print('{}Use plot_x_range in hparams:{}'.format(PREFIX, hparams['plot_x_range']))
self.plot_x_range = hparams['plot_x_range']
self.plot_y_range = None
if hparams and 'plot_y_range' in hparams.keys():
print('{}Use plot_y_range in hparams:{}'.format(PREFIX, hparams['plot_y_range']))
self.plot_y_range = hparams['plot_y_range']
self.plot_title = None
if hparams and 'plot_title' in hparams.keys():
print('{}Use plot_title in hparams:{}'.format(PREFIX, hparams['plot_title']))
self.plot_title = hparams['plot_title']
self.plot_errors = None
if hparams and 'plot_errors' in hparams.keys():
print('{}Use plot_errors in hparams:{}'.format(PREFIX, hparams['plot_errors']))
self.plot_errors = hparams['plot_errors']
self.plot_animation = False
if hparams and 'plot_animation' in hparams.keys():
print('{}Use plot_animation in hparams:{}'.format(PREFIX, hparams['plot_animation']))
self.plot_animation = hparams['plot_animation']
if self.plot_animation is None:
self.plot_animation = False
print('{}Use plot_animation with default value:{}'.format(PREFIX, self.plot_animation))
self.calc_cc_errors = False
if hparams and 'calc_cc_errors' in hparams.keys():
print('{}Use calc_cc_errors in hparams:{}'.format(PREFIX, hparams['calc_cc_errors']))
self.calc_cc_errors = hparams['calc_cc_errors']
if self.calc_cc_errors is None:
self.calc_cc_errors = False
print('{}Use calc_cc_errors with default value:{}'.format(PREFIX, self.calc_cc_errors))
self.op_errors = None
if hparams and 'op_errors' in hparams.keys():
print('{}Use op_errors in hparams:{}'.format(PREFIX, hparams['op_errors']))
self.op_errors = hparams['op_errors']
# rank_boundary_list
self.rank_boundary_list = None
if hparams and 'rank_boundary_list' in hparams.keys():
print('{}Use rank_boundary_list in hparams:{}'.format(PREFIX, hparams['rank_boundary_list']))
self.rank_boundary_list = hparams['rank_boundary_list']
if self.rank_boundary_list is not None:
# check the members of rank_boundary_list
len_of_rank_boundary_list = len(self.rank_boundary_list)
if len_of_rank_boundary_list < 1:
self.rank_boundary_list = None
for rank_boundary in self.rank_boundary_list:
try:
assert len(rank_boundary) > 1
lower = rank_boundary[0]
upper = rank_boundary[1]
print('{}rank_boundary lower:{}, func:{}'.format(PREFIX, lower, upper))
except Exception as e:
print('{}No rank_boundary_list is set because of error {} on invalid parameter:{}'.format(PREFIX, e, rank_boundary))
else:
print('{}No rank_boundary_list is set'.format(PREFIX))
# cloud settings
self.cloud_root = None
if hparams and 'cloud_root' in hparams.keys():
print('{}Use cloud_root in hparams:{}'.format(PREFIX, hparams['cloud_root']))
self.cloud_root = hparams['cloud_root']
self.prioritize_cloud = False
if hparams and 'prioritize_cloud' in hparams.keys():
print('{}Use prioritize_cloud in hparams:{}'.format(PREFIX, hparams['prioritize_cloud']))
self.prioritize_cloud = hparams['prioritize_cloud']
if self.prioritize_cloud is None:
self.prioritize_cloud = False
print('{}Use prioritize_cloud with default value:{}'.format(PREFIX, self.prioritize_cloud))
# local setting
self.save_root_dir = '/var/tensorflow/tsp/'
if hparams and 'save_root_dir' in hparams.keys():
print('{}Use save_root_dir in hparams:{}'.format(PREFIX, hparams['save_root_dir']))
self.save_root_dir = hparams['save_root_dir']
else:
print('{}TODO Use save_root_dir with default value'.format(PREFIX))
# check init model
self.sess = tf.InteractiveSession()
self.init_model_path = None
if hparams and 'init_model_path' in hparams.keys():
print('{}Use init_model_path in hparams:{}'.format(PREFIX, hparams['init_model_path']))
self.init_model_path = hparams['init_model_path']
# set output_classes in CLASSIFICATION model
self.output_classes = None
if hparams and 'output_classes' in hparams.keys():
print('{}Use output_classes in hparams:{}'.format(PREFIX, hparams['output_classes']))
self.output_classes = hparams['output_classes']
# if output_classes is not set in CLASSIFICATION model, try to read from init_model_path
if self.init_model_path is not None and self.model_type == 'CLASSIFICATION':
self.output_classes = self.get_output_classes_from_model(self.init_model_path)
hparams['output_classes'] = self.output_classes
self.log_dir_path = log_dir_path
def prepare_model(self):
last_time = time.time()
self.result_sum = []
self.sess = tf.InteractiveSession()
self.define_model()
print('---------- time:{} DONE define_model'.format(time.time() - last_time))
last_time = time.time()
self.saver = tf.train.Saver(var_list=None, max_to_keep=None)
self.global_iter = 0
self.sess.run(tf.global_variables_initializer())
self.restore_model()
print('---------- time:{} DONE init model'.format(time.time() - last_time))
last_time = time.time()
def auto_set_model_parameter(self):
print('TODO auto_set_model_parameter')
self.can_not_generate_input_output_data = None
self.generate_data_set()
self.input_width = self.data_set.input_ts_width
self.col_size = self.data_set.col_size
self.output_classes = self.data_set.output_classes
# info_dim_size_list = []
print('DONE auto_set_model_parameter')
return True
def generate_data_set(self):
self.data_set = TSDataSet(debug_mode=self.debug_mode, prediction_mode=self.prediction_mode, hparams=self.hparams)
self.data_set.generate_input_output_data()
def restore_model(self):
'''
Class method to restore model
(No need to set args, use hparams to restore model)
:return:
'''
# restore model
if self.init_model_path is not None:
print('[restore_model]restore model from {}'.format(self.init_model_path))
has_restored = self.restore(self.init_model_path, self.restore_var_name_list)
print('[restore_model]has_restored:', has_restored)
# if it has not been restored, then the model will be initialized with Prob dist.
else:
print('[restore_model]init_model_path is empty. No need to restore')
# Set optimizer again when trainable_variables is changed
if self.untrainable_var_name_list is not None:
self.trainable_variables = self.remove_trainable(self.untrainable_var_name_list)
self.set_optimizer()
def restore(self, init_model_path, var_name_list=None):
from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud
if init_model_path is None or len(init_model_path) < 1 or os.path.isfile(init_model_path):
print('[restore]init_model_path is empty. No need to restore')
return False
if var_name_list is not None:
trainable_variables = self.get_trainable_variables()
var_name_list_to_check = [
name if (len(name.split(':')) > 1 and name.split(':')[1] == '0') else '{}:0'.format(name) for name in
var_name_list]
var_to_restore = [var for var in trainable_variables if (var.name in var_name_list_to_check)]
print('var_name_list:{}, var_to_load:{}'.format(var_name_list, var_to_restore))
else:
var_to_restore = None
self.saver = tf.train.Saver(var_list=var_to_restore, max_to_keep=None)
# Initialize all variables
print('[restore]Initialize all variables')
self.sess.run(tf.global_variables_initializer())
# Restore by saver
print('[restore]Restore from init_model_path:{}'.format(init_model_path))
local_init_model_path = init_model_path
if self.prioritize_cloud:
# download from S3 if the "init_model_path" is S3 path
if is_s3_path(init_model_path):
_paths, _global_iter_got_from_path = get_tf_model_file_paths(init_model_path)
for _path in _paths:
local_init_model_path = download_to_local(path=_path, work_dir_path='/var/tmp/tsp/')
local_init_model_path = local_init_model_path.split('.ckpt')[0] + '.ckpt'
if _global_iter_got_from_path is not None:
local_init_model_path = local_init_model_path + '-' + str(_global_iter_got_from_path)
else:
print('[restore]Restore from local:{}'.format(init_model_path))
print('[restore]Restore from local_init_model_path:{}'.format(local_init_model_path))
if local_init_model_path is None or len(local_init_model_path) < 1 or os.path.isfile(local_init_model_path):
print('[restore]local_init_model_path is empty. Can not restore')
return False
self.saver.restore(self.sess, local_init_model_path)
print('[restore]Set var_name_list untrainable')
# Reset saver in other to save all variables
self.saver = tf.train.Saver(var_list=None, max_to_keep=None)
return True
def remove_trainable(self, var_name_list, current_trainable_variables=None):
if current_trainable_variables is None: current_trainable_variables = self.get_trainable_variables()
print('[remove_trainable]remove from current_trainable_variables: {}'.format(current_trainable_variables))
var_name_list_to_check = [
name if (len(name.split(':')) > 1 and name.split(':')[1] == '0') else '{}:0'.format(name) for name in
var_name_list]
print('[remove_trainable]remove var_name_list_to_check: {}'.format(var_name_list_to_check))
trainable_variables = [var for var in current_trainable_variables if (var.name not in var_name_list_to_check)]
print('[remove_trainable]trainable_variables: {}'.format(current_trainable_variables))
return trainable_variables
def get_trainable_variables(self):
all_collection_keys = tf.get_default_graph().get_all_collection_keys()
# print('all_collection_keys:{}'.format(all_collection_keys))
trainable_variables = tf.get_default_graph().get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)
# print('trainable_variables:{}'.format(trainable_variables))
return trainable_variables
def get_output_classes_from_model(self, init_model_path):
from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud
print('[get_output_classes_from_model]Restore from init_model_path:{}'.format(init_model_path))
local_init_model_path = init_model_path
if self.prioritize_cloud:
# download from S3 if the "init_model_path" is S3 path
if is_s3_path(init_model_path):
_paths, _global_iter_got_from_path = get_tf_model_file_paths(init_model_path)
for _path in _paths:
local_init_model_path = download_to_local(path=_path, work_dir_path='/var/tmp/tsp/')
local_init_model_path = local_init_model_path.split('.ckpt')[0] + '.ckpt'
if _global_iter_got_from_path is not None:
local_init_model_path = local_init_model_path + '-' + str(_global_iter_got_from_path)
else:
print('[get_output_classes_from_model]Check local:{}'.format(init_model_path))
print('[get_output_classes_from_model]Check local_init_model_path:{}'.format(local_init_model_path))
if local_init_model_path is None or len(local_init_model_path) < 1 or os.path.isfile(local_init_model_path):
print('[get_output_classes_from_model]local_init_model_path is empty. output_classes set None')
self.output_classes = None
return None
meta_file_path = '{}.meta'.format(local_init_model_path)
_saver = tf.train.import_meta_graph(meta_file_path)
_saver.restore(self.sess, local_init_model_path)
# get output_classes from | |
<filename>src/Products/CMFCore/tests/test_CachingPolicyManager.py
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit tests for CachingPolicyManager module.
"""
import os
import unittest
from os.path import join as path_join
from AccessControl.SecurityManagement import newSecurityManager
from Acquisition import Implicit
from DateTime.DateTime import DateTime
from OFS.Cache import Cacheable
from zope.component import getSiteManager
from zope.datetime import rfc1123_date
from zope.interface.verify import verifyClass
from ..FSDTMLMethod import FSDTMLMethod
from ..FSPageTemplate import FSPageTemplate
from ..interfaces import IMembershipTool
from ..interfaces import ITypesTool
from ..testing import FunctionalZCMLLayer
from ..testing import TraversingZCMLLayer
from ..utils import base64_encode
from .base.dummy import DummyContent
from .base.dummy import DummySite
from .base.dummy import DummyTool
from .base.testcase import FSDVTest
from .base.testcase import SecurityTest
from .base.testcase import TransactionalTest
ACCLARK = DateTime('2001/01/01')
portal_owner = b'portal_owner'
class DummyContent2:
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, modified):
self.modified = modified
def Type(self):
return 'Dummy'
def modified(self):
return self.modified
class CacheableDummyContent(Implicit, Cacheable):
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, id):
self.id = id
self.modified = DateTime()
def getId(self):
""" """
return self.id
def modified(self):
return self.modified
def __call__(self):
""" """
if self.ZCacheable_isCachingEnabled():
result = self.ZCacheable_get(default=None)
if result is not None:
# We will always get None from RAMCacheManager and HTTP
# Accelerated Cache Manager but we will get
# something implementing the IStreamIterator interface
# from a "FileCacheManager"
return result
self.ZCacheable_set(None)
class DummyView(CacheableDummyContent):
meta_type = 'DTML Method'
class CachingPolicyTests(unittest.TestCase):
layer = TraversingZCMLLayer
def _makePolicy(self, policy_id, **kw):
from ..CachingPolicyManager import CachingPolicy
return CachingPolicy(policy_id, **kw)
def _makeContext(self, **kw):
from ..CachingPolicyManager import createCPContext
return createCPContext(DummyContent2(self._epoch),
'foo_view', kw, self._epoch)
def setUp(self):
self._epoch = DateTime(0)
getSiteManager().registerUtility(DummyTool(), IMembershipTool)
def test_interfaces(self):
from ..CachingPolicyManager import CachingPolicy
from ..interfaces import ICachingPolicy
verifyClass(ICachingPolicy, CachingPolicy)
def test_empty(self):
policy = self._makePolicy('empty')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0], 'Last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
def test_noPassPredicate(self):
policy = self._makePolicy('noPassPredicate', predicate='nothing')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
def test_typePredicate(self):
policy = self._makePolicy('typePredicate',
predicate='python:object.Type() == "Dummy"')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0], 'Last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
def test_typePredicateMiss(self):
policy = self._makePolicy('typePredicate',
predicate='python:object.Type()=="Foolish"')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
def test_viewPredicate(self):
policy = self._makePolicy('viewPredicate',
predicate='python:view == "foo_view"')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0], 'Last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
def test_viewPredicateMiss(self):
policy = self._makePolicy('viewPredicateMiss',
predicate='python:view == "bar_view"')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
def test_kwPredicate(self):
policy = self._makePolicy('kwPredicate',
predicate='python:"foo" in keywords')
context = self._makeContext(foo=1)
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0], 'Last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
def test_kwPredicateMiss(self):
policy = self._makePolicy('kwPredicateMiss',
predicate='python:"foo" in keywords')
context = self._makeContext(bar=1)
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
def test_mtimeFunc(self):
policy = self._makePolicy('mtimeFunc', mtime_func='string:2001/01/01')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0], 'Last-modified')
self.assertEqual(headers[0][1], rfc1123_date(ACCLARK.timeTime()))
def test_mtimeFuncNone(self):
policy = self._makePolicy('mtimeFuncNone', mtime_func='nothing')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
def test_maxAge(self):
policy = self._makePolicy('aged', max_age_secs=86400)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'expires')
self.assertEqual(headers[1][1],
rfc1123_date((self._epoch + 1).timeTime()))
self.assertEqual(headers[2][0].lower(), 'cache-control')
self.assertEqual(headers[2][1], 'max-age=86400')
def test_sMaxAge(self):
policy = self._makePolicy('s_aged', s_max_age_secs=86400)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 's-maxage=86400')
self.assertEqual(policy.getSMaxAgeSecs(), 86400)
def test_noCache(self):
policy = self._makePolicy('noCache', no_cache=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'pragma')
self.assertEqual(headers[1][1], 'no-cache')
self.assertEqual(headers[2][0].lower(), 'cache-control')
self.assertEqual(headers[2][1], 'no-cache')
def test_noStore(self):
policy = self._makePolicy('noStore', no_store=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'no-store')
def test_mustRevalidate(self):
policy = self._makePolicy('mustRevalidate', must_revalidate=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'must-revalidate')
def test_proxyRevalidate(self):
policy = self._makePolicy('proxyRevalidate', proxy_revalidate=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'proxy-revalidate')
self.assertEqual(policy.getProxyRevalidate(), 1)
def test_public(self):
policy = self._makePolicy('public', public=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'public')
self.assertEqual(policy.getPublic(), 1)
def test_private(self):
policy = self._makePolicy('private', private=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'private')
self.assertEqual(policy.getPrivate(), 1)
def test_noTransform(self):
policy = self._makePolicy('noTransform', no_transform=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'no-transform')
self.assertEqual(policy.getNoTransform(), 1)
def test_lastModified(self):
policy = self._makePolicy('lastModified', last_modified=0)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 0)
self.assertEqual(policy.getLastModified(), 0)
def test_preCheck(self):
policy = self._makePolicy('preCheck', pre_check=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'pre-check=1')
self.assertEqual(policy.getPreCheck(), 1)
self.assertEqual(policy.getPostCheck(), None)
def test_postCheck(self):
policy = self._makePolicy('postCheck', post_check=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'cache-control')
self.assertEqual(headers[1][1], 'post-check=1')
self.assertEqual(policy.getPostCheck(), 1)
self.assertEqual(policy.getPreCheck(), None)
def test_ETag(self):
# With an empty etag_func, no ETag should be produced
policy = self._makePolicy('ETag', etag_func='')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
policy = self._makePolicy('ETag', etag_func='string:foo')
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 2)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'etag')
self.assertEqual(headers[1][1], 'foo')
def test_combined(self):
policy = self._makePolicy('noStore', no_cache=1, no_store=1)
context = self._makeContext()
headers = policy.getHeaders(context)
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'pragma')
self.assertEqual(headers[1][1], 'no-cache')
self.assertEqual(headers[2][0].lower(), 'cache-control')
self.assertEqual(headers[2][1], 'no-cache, no-store')
class CachingPolicyManagerTests(unittest.TestCase):
layer = TraversingZCMLLayer
def _getTargetClass(self):
from ..CachingPolicyManager import CachingPolicyManager
return CachingPolicyManager
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def setUp(self):
self._epoch = DateTime()
getSiteManager().registerUtility(DummyTool(), IMembershipTool)
def assertEqualDelta(self, lhs, rhs, delta):
self.assertTrue(abs(lhs - rhs) <= delta)
def test_interfaces(self):
from ..CachingPolicyManager import CachingPolicyManager
from ..interfaces import ICachingPolicyManager
verifyClass(ICachingPolicyManager, CachingPolicyManager)
def test_empty(self):
mgr = self._makeOne()
self.assertEqual(len(mgr.listPolicies()), 0)
headers = mgr.getHTTPCachingHeaders(content=DummyContent2(self._epoch),
view_method='foo_view',
keywords={},
time=self._epoch)
self.assertEqual(len(headers), 0)
self.assertRaises(KeyError, mgr._updatePolicy,
'xyzzy', None, None, None, None, None, None, '',
'', None, None, None, None, None)
self.assertRaises(KeyError, mgr._removePolicy, 'xyzzy')
self.assertRaises(KeyError, mgr._reorderPolicy, 'xyzzy', -1)
def test_addAndUpdatePolicy(self):
mgr = self._makeOne()
mgr.addPolicy('first', 'python:1', 'mtime', 1, 0, 1, 0, 'vary',
'etag', None, 2, 1, 0, 1, 0, 1, 0, 2, 3)
p = mgr._policies['first']
self.assertEqual(p.getPolicyId(), 'first')
self.assertEqual(p.getPredicate(), 'python:1')
self.assertEqual(p.getMTimeFunc(), 'mtime')
self.assertEqual(p.getMaxAgeSecs(), 1)
self.assertEqual(p.getNoCache(), 0)
self.assertEqual(p.getNoStore(), 1)
self.assertEqual(p.getMustRevalidate(), 0)
self.assertEqual(p.getVary(), 'vary')
self.assertEqual(p.getETagFunc(), 'etag')
self.assertEqual(p.getSMaxAgeSecs(), 2)
self.assertEqual(p.getProxyRevalidate(), 1)
self.assertEqual(p.getPublic(), 0)
self.assertEqual(p.getPrivate(), 1)
self.assertEqual(p.getNoTransform(), 0)
self.assertEqual(p.getEnable304s(), 1)
self.assertEqual(p.getLastModified(), 0)
self.assertEqual(p.getPreCheck(), 2)
self.assertEqual(p.getPostCheck(), 3)
mgr.updatePolicy('first', 'python:0', 'mtime2', 2, 1, 0, 1, 'vary2',
'etag2', None, 1, 0, 1, 0, 1, 0, 1, 3, 2)
p = mgr._policies['first']
self.assertEqual(p.getPolicyId(), 'first')
self.assertEqual(p.getPredicate(), 'python:0')
self.assertEqual(p.getMTimeFunc(), 'mtime2')
self.assertEqual(p.getMaxAgeSecs(), 2)
self.assertEqual(p.getNoCache(), 1)
self.assertEqual(p.getNoStore(), 0)
self.assertEqual(p.getMustRevalidate(), 1)
self.assertEqual(p.getVary(), 'vary2')
self.assertEqual(p.getETagFunc(), 'etag2')
self.assertEqual(p.getSMaxAgeSecs(), 1)
self.assertEqual(p.getProxyRevalidate(), 0)
self.assertEqual(p.getPublic(), 1)
self.assertEqual(p.getPrivate(), 0)
self.assertEqual(p.getNoTransform(), 1)
self.assertEqual(p.getEnable304s(), 0)
self.assertEqual(p.getLastModified(), 1)
self.assertEqual(p.getPreCheck(), 3)
self.assertEqual(p.getPostCheck(), 2)
def test_reorder(self):
mgr = self._makeOne()
policy_ids = ('foo', 'bar', 'baz', 'qux')
for policy_id in policy_ids:
mgr._addPolicy(policy_id,
'python:"%s" in keywords' % policy_id,
None, 0, 0, 0, 0, '', '')
ids = tuple([x[0] for x in mgr.listPolicies()])
self.assertEqual(ids, policy_ids)
mgr._reorderPolicy('bar', 3)
ids = tuple([x[0] for x in mgr.listPolicies()])
self.assertEqual(ids, ('foo', 'baz', 'qux', 'bar'))
def _makeOneWithPolicies(self):
mgr = self._makeOne()
policy_tuples = (('foo', None),
('bar', 0),
('baz', 3600),
('qux', 86400))
for policy_id, max_age_secs in policy_tuples:
mgr._addPolicy(policy_id,
'python:"%s" in keywords' % policy_id,
None, max_age_secs, 0, 0, 0, '', '')
return mgr
def test_lookupNoMatch(self):
mgr = self._makeOneWithPolicies()
headers = mgr.getHTTPCachingHeaders(content=DummyContent2(self._epoch),
view_method='foo_view',
keywords={},
time=self._epoch)
self.assertEqual(len(headers), 0)
def test_lookupMatchFoo(self):
mgr = self._makeOneWithPolicies()
headers = mgr.getHTTPCachingHeaders(content=DummyContent2(self._epoch),
view_method='foo_view',
keywords={'foo': 1},
time=self._epoch)
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
def test_lookupMatchBar(self):
mgr = self._makeOneWithPolicies()
headers = mgr.getHTTPCachingHeaders(content=DummyContent2(self._epoch),
view_method='foo_view',
keywords={'bar': 1},
time=self._epoch)
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'expires')
self.assertEqual(headers[1][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[2][0].lower(), 'cache-control')
self.assertEqual(headers[2][1], 'max-age=0')
def test_lookupMatchBaz(self):
mgr = self._makeOneWithPolicies()
headers = mgr.getHTTPCachingHeaders(content=DummyContent2(self._epoch),
view_method='foo_view',
keywords={'baz': 1},
time=self._epoch)
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'expires')
exp_time = DateTime(headers[1][1])
target = self._epoch + (1.0 / 24.0)
self.assertEqualDelta(exp_time, target, 0.01)
self.assertEqual(headers[2][0].lower(), 'cache-control')
self.assertEqual(headers[2][1], 'max-age=3600')
def test_lookupMatchQux(self):
mgr = self._makeOneWithPolicies()
headers = mgr.getHTTPCachingHeaders(content=DummyContent2(self._epoch),
view_method='foo_view',
keywords={'qux': 1},
time=self._epoch)
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0][0].lower(), 'last-modified')
self.assertEqual(headers[0][1], rfc1123_date(self._epoch.timeTime()))
self.assertEqual(headers[1][0].lower(), 'expires')
exp_time = DateTime(headers[1][1])
target = self._epoch + 1.0
self.assertEqualDelta(exp_time, target, 0.01)
self.assertEqual(headers[2][0].lower(), 'cache-control')
self.assertEqual(headers[2][1], 'max-age=86400')
class CachingPolicyManager304Tests(SecurityTest, FSDVTest):
layer = TraversingZCMLLayer
def _getTargetClass(self):
from ..CachingPolicyManager import CachingPolicyManager
return CachingPolicyManager
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def setUp(self):
from ..interfaces import ICachingPolicyManager
SecurityTest.setUp(self)
FSDVTest.setUp(self)
now = DateTime()
# Create a fake portal and the | |
temp_output = []
for i in range(self.num_inputs):
if isinstance(normalized_variables[i][record], list):
for num in normalized_variables[i][record]:
temp_input.append(float(num))
else:
temp_input.append(normalized_variables[i][record])
for i in range(self.num_outputs):
if isinstance(normalized_variables[i + self.num_inputs][record], list):
for num in normalized_variables[i + self.num_inputs][record]:
temp_output.append(float(num))
else:
temp_output.append(normalized_variables[i + self.num_inputs][record])
normalized_data.append([temp_input, temp_output])
record += 1
self.normalized_data = normalized_data
# if self.parent_study.using_numpy:
# temp_array = []
# for pair in normalized_data:
# temp_array.append(map(lambda x: np.array(x, ndmin=2).T, pair))
#
# self.np_normalized_data = np.array(temp_array)
self.save_normalized_data_to_file()
# to make the order of data n a random sequence
# random.shuffle(self.normalized_data)
# print self.get_mean_row()
def read_file(self):
# train = np.array(list(csv.reader(open(self.source_data_file, "rb"), delimiter=','))) # .astype('float')
"""
reading the data file
@return: list of lists, each sub list is a data line
"""
tmp = []
try:
with open(self.source_data_file, 'rb') as csvfile:
spam_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spam_reader:
# tmp.append(', '.join(row))
tmp.append(row)
except:
print '\nFile Not found.\nThe fine named {} is not found at the root directory, ' \
'please make sure it is located at the correct location and try again.'.format(self.source_data_file)
exit()
def read_cell(cel):
"""
Read data and specify if string or numeric
@param cel: data cell
@return: float of string value
"""
try: # if is is a number
return float(cel)
except: # otherwise return a trimmed string (with no spaces on either directions
return cel.strip()
# return x
# creating titles and separating data from them
var_count = len(tmp[0])
self.num_inputs = var_count - self.num_outputs
if self.has_titles and self.has_briefs:
# remove white spaces if any (trim)
tmp[0] = map(lambda x: x.strip(), tmp[0])
tmp[1] = map(lambda x: x.strip(), tmp[1])
self.titles = tmp[0]
self.briefs = tmp[1]
tmp = tmp[2:]
elif self.has_titles:
# if it only has full titles, we will initiate a brief title
tmp[0] = map(lambda x: x.strip(), tmp[0])
self.titles = tmp[0]
self.briefs = ['In' + str(x) if x < self.num_inputs
else 'Ot' + str(x - self.num_inputs) for x in range(var_count)]
tmp = tmp[1:]
elif self.has_briefs:
# if it only has briefs we will consider them as full titles as well
tmp[0] = map(lambda x: x.strip(), tmp[0])
self.briefs = tmp[0]
self.titles = tmp[0]
tmp = tmp[1:]
else: # no titles provided
self.titles = ['Input variable {' + str(x + 1) + '}' if x < self.num_inputs
else 'Output variable {' + str(x - self.num_inputs + 1) + '}' for x in range(var_count)]
self.briefs = ['In' + str(x + 1) if x < self.num_inputs
else 'Ot' + str(x - self.num_inputs + 1) for x in range(var_count)]
data_ok = []
for line in tmp:
lll = []
for cell in line:
lll.append(read_cell(cell))
data_ok.append(lll)
return data_ok
def save_normalized_data_to_file(self, clear_file=True, file_name='NormalizedData.csv'):
"""
Save normalized data to a text file
@param clear_file:If True, then the file will be cleaned before appending current data,
otherwise, it will append current data to previous data
@param file_name: the saving file name, Default value is 'NormalizedData.csv'
"""
file_name = self.parent_study.new_folder_path + '\\' + file_name
if clear_file:
open(file_name, "w").close()
file_ann = open(file_name, "a")
for line in self.normalized_data:
clean_line = str(line)
clean_line = clean_line.replace('[', '')
clean_line = clean_line.replace(']', '')
clean_line = clean_line.replace("'", "")
file_ann.writelines(clean_line + '\n')
file_ann.close()
def get_normalized_structure(self):
"""
returns the normalized structure of the ANN
@return: a tuple of (# of inputs, # of hidden, # of outputs)
"""
inputs = self.num_inputs
outputs = self.num_outputs
self.data_style = []
for i, var in enumerate(self.input_variables):
if var.data_type != 'Numeric':
unique_values = len(var.unique_values)
inputs += unique_values - 1
for j in range(unique_values):
self.data_style.append('cI' + str(i) + '-' + str(j))
else:
self.data_style.append('nI' + str(i))
for i, var in enumerate(self.output_variables):
if var.data_type != 'Numeric':
unique_values = len(var.unique_values)
outputs += unique_values - 1
for j in range(unique_values):
self.data_style.append('cO' + str(i) + '-' + str(j))
else:
self.data_style.append('nO' + str(i))
# Consider hidden size = 2/3 the sum of inputs and outputs
hidden = int(math.ceil((inputs + outputs) * 2 / 3))
return inputs, hidden, outputs
def get_titles(self, title_type='titles', source='inputs'):
"""
returns titles of data depending on requested parameters
@param title_type: either 'titles', or 'briefs'. The default is 'titles'
@param source: either 'inputs' to return input variables' titles or briefs,
or 'outputs' to return output variables' titles or briefs
@return: required feature as described above
"""
variables = self.input_variables if source == 'inputs' else self.output_variables
tmp = []
for var in variables:
tmp.append(var.name if title_type == 'titles' else var.brief)
return tmp
def get_mean_row(self, expanded=True, location='average', encrypted_result=True):
"""
@param expanded: if True (Default), it will returns number of 'c' equivalent to
the number of members of the categoric variable.
otherwise, returns one 'c' per categoric variable
@param location: Default is 'average' to return the line in the middle of the data
Other values are '1/4' and '3/4', but not yet implemented
@param encrypted_result:NOT yet completed, leave defaults please
Default is True, to return 'c's or 'n's
@return:
"""
mean_row = []
if location == 'average':
if encrypted_result:
for var in self.input_variables:
if var.data_type != 'Numeric': # categoric, it will return only the number of categories
if not expanded:
mean_row.append('c' + str(len(var.get_basic_stats())))
else:
for i in range(len(var.get_basic_stats())):
mean_row.append('c')
else: # Numeric
mean_row.append(var.get_basic_stats()[0])
else:
for var in self.input_variables:
if var.data_type != 'Numeric':
mean_row.extend([0 for i in range(len(var.get_basic_stats()))])
elif location == '1/4':
pass
elif location == '3/4':
pass
return mean_row
def get_data_style(self, required_style='binary'):
"""
:return:
@param required_style: Default is 'binary',
returns a list of boolean values
True = Numeric
False = Categoric@return:
Otherwise, 'vars' returns just number of variables.
"""
temp = None
if required_style == 'binary':
temp = []
for var in self.data_style:
if var[0] == 'c':
temp.append(False)
else:
temp.append(True)
elif required_style == 'vars':
temp = [[], []]
for var in self.data_style:
if var[1] == 'I':
temp[0].append(int(var[2]))
else:
temp[1].append(int(var[2]))
return temp
class Variable:
"""
A new variable, to define data_type, min, max, etc...
"""
def __init__(self, value_list, caption, data_type='Numeric', brief='Var'):
"""
@param value_list: the list of values of this variable
@param caption: the caption/ title of the variable
@param data_type: its data type (Numeric or Categoric)
@param brief: Its brief title (for small plots)
"""
def average(s):
"""
Calculates the average of a list
@param s: A list of values
@return: its average
"""
return sum(s) * 1.0 / len(s)
self.name = caption
self.brief = brief
self.data_type = data_type
self.values = value_list
if self.data_type == 'Numeric':
self.min = min(value_list)
self.max = max(value_list)
self.count = len(value_list)
self.avg = average(value_list)
self.var = average(map(lambda x: (x - self.avg) ** 2, self.values))
self.stdev = math.sqrt(self.var)
if self.min == self.max: # The variable is single-valued variable and should be removed
self.is_valid = False
print "The variable of name '" + caption + "' is a single valued variable! it will not be " \
"considered in the analysis."
else:
self.is_valid = True
else:
# self.unique_values = sorted(list(set(value_list)))
# collections.Counter([i-i%3+3 for i in self.values])
value_list_ok = [i for i in value_list]
self.frequency = collections.Counter(value_list_ok)
# collections.Counter([i-i%3+3 for i in self.values])
self.members = [] # will be filled after normalization(similar to unique values but sort descending)
self.normalized_lists = [] # will be filled after normalization
self.members_indices = {}
self.do_one_of_many_normalization()
# change the unique_values list to be like the members list
self.unique_values = self.members
self.num_categories = len(self.unique_values)
# print self.get_de_normalized_value([.4, .8, .1, .0, .7, .2])
if self.num_categories == 1:
self.is_valid = False
print "The variable of name '" + caption + "' is a single valued variable! it will not be " \
"considered in the analysis."
else:
self.is_valid = True
pass
def __str__(self):
# print 'Variable: ', self.name
"""
Prints the basic information about the variable to the console
@return: ... And returns what is printed!
"""
string = ''
if self.data_type != 'Numeric': # Categoric data types
labels = ['Variable', 'Brief name', 'Data type', 'Values', 'Num. of categories', 'Frequencies']
l = max(map(lambda x: len(x), labels)) + 1
values = [self.name, self.brief, self.data_type, self.unique_values, self.num_categories,
dict(self.frequency)]
for i, label in enumerate(labels):
string += '{:<{}s}'.format(label, l) + ': ' + str(values[i]) + '\n'
else:
labels = | |
import math, torch
import numpy as np
from numpy.random import normal as normrnd
from scipy.stats import multivariate_normal, norm
from scipy.linalg import sqrtm, expm
from pdb import set_trace as bp
from include.DNN import DNN
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from include.dataStructures.particle import Particle
class localize:
def __init__(self, numP, su, sz, distMap, mat, wayPts, R, dim, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dists = distMap
self.dim = dim
self.wayPts = wayPts
self.pts = self.convert(wayPts)
self.nAP = mat.numAPs
self.tx = mat.Tx
self.R = R
self.start = self.wayPts[0]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
if self.dim == 2: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
if self.dim == 3: dz = point[2] - normrnd(0, su[2])
if self.dim == 3: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy, samples[i].pose[2] + dz]
samples[i].pose = pose
'''
measurement model for the particle filter
label for dMap = 1 : NLOS , 0 : LOS
'''
def measure_model(self, samples, z):
totalWt = 0 ; nAP = len(z)
for i in range(self.np):
dz = [0 for x in range(nAP)]
for j in range(nAP):
tx = self.tx[j] ; pos = samples[i].pose
d = self.distance(tx, pos)
if d <= self.R:
if self.use:
if self.hard:
label = self.classify(z[j].rssi, d)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
dz[j] = abs(z[j].rssi-d)
else:
inp = torch.tensor([z[j].rssi, d])
out = self.model(inp.float()).detach().numpy()
dz[j] = out[0]*abs(z[j].rssi-d) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false positive
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false negative
else:
dz[j] = abs(z[j].rssi-d)
wt = self.getWeight(dz)
samples[i].w *= wt
totalWt += wt
if totalWt!=0:
for i in range(self.np):
samples[i].w = samples[i].w / totalWt
else:
for i in range(self.np):
samples[i].w = 1/self.np
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, z):
if self.dim == 2: Qt = np.diag([10,10])
if self.dim == 3: Qt = np.diag([10,10,10])
Qt = Qt.tolist() ; nAP = len(z) ; totWt = 0
for i in range(self.np):
for j in range(nAP):
tx = np.array(self.tx[j]) ; pos = np.array(samples[i].pose)
d = self.distance(tx, pos)
if d <= self.R:
# initialize particle map
if j not in samples[i].mapID:
samples[i].mapMu.append(tx)
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(j)
samples[i].hashMap[j] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[j]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
# use classifier or not
if self.use:
if self.hard:
label = self.classify(z[j].rssi, dHat)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
innov = abs(z[j].rssi-dHat)
else:
continue
else:
inp = torch.tensor([z[j].rssi, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(z[j].rssi - dHat) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
else:
innov = abs(z[j].rssi - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
if self.dim==3:
dz = muHat[2] - pos[2]
den = math.sqrt(dx**2 + dy**2 + dz**2)
H = np.array([dx/den, dy/den, dz/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
| |
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
return Ln
def func_274c29e3e1244da3b1b5d8eb82909935(input):
line = input.readline().split()
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
return i
def func_8b249f43a02344e3937a53df149f940e(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return i
def func_91cf33ea3dcc4f24b8b9406a5f2101b1(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return line
def func_73e3bbc45d89414e8d03cbb496a9cea1(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return y
def func_961541194efa4e82b4401054eb12f225(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return x
def func_cae936e9e42e426eb0c0fc38c2e313d9(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return Ln
def func_dd086ff8e08a4c7fbdc7c5716a167710(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return G
def func_1034ce88e7a746db9ee2f27bc9b38cb6(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return W
def func_2eb22b3eaa944ea697f9ab683a313a9b(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return L
def func_c0094d0f670d42b38962d9e06ab25adc(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return U
def func_fef22b20348a4fb7b37e0cbb71e50707(input):
W = int(line[0])
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
return Un
def func_4f612b42db234ed498b71b4f3791c68c(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return x
def func_00b9dc9bd80345eea12b41e1a103ae5c(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return Un
def func_6d2fd5cfa7be471d8b68d3c15333c5b7(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return i
def func_bf383be0c4154071965497e0ca0e6452(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return y
def func_bbc2b84f4c8e42a4bc1df5ae3f7c4a81(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return G
def func_72e21e059ad249a5a4629442c6e41cb0(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return line
def func_e895bc49a26943d5a0d8f717bbab0de4(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return U
def func_cc04bbd70fa6463783f918db4859e1ba(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return L
def func_abed7c8e17804f16b45d3b10e4191f6d(input):
Ln = int(line[1])
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
return Ln
def func_7d38160a11d14357bbfeeba721ee1e02(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return i
def func_81563792259d46ebb3f1e683c28e3198(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return line
def func_2bc4ca6dd1d844a8af484ce721c1b8fc(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return G
def func_a241ca65bec8415ba481bb4483510f98(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return res
def func_0a950d3e16694eb3ad9dc1d5f9833345(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return y
def func_74e6642c1cb84eccb5bfcfce673cae00(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return Un
def func_993cbab5a8ba45a0a16db87f66306f82(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, U, G)
return x
def func_896778f8096a4cb7bef6ab941e445e00(W, Ln, input):
Un = int(line[2])
G = int(line[3])
L = []
for i in range(Ln):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
L.append((x, y))
U = []
for i in range(Un):
line = input.readline().split()
x = int(line[0])
y = int(line[1])
U.append((x, y))
res = solve(W, L, | |
are read from image file
elif isinstance(Data_for_localMaxima, str) or Data_for_localMaxima is None:
Data, framedim, fliprot = IOimage.readCCDimage(filename,
stackimageindex=stackimageindex,
CCDLabel=CCDLabel,
dirname=None,
verbose=verbose)
# if verbose: print("image from filename {} read!".format(filename))
# peak search in a single and particular region of image
if center is not None:
framediminv = (framedim[1], framedim[0])
imin, imax, jmin, jmax = ImProc.getindices2cropArray(center, boxsizeROI, framediminv)
Data = Data[jmin:jmax, imin:imax]
if write_execution_time:
dtread = ttt.time() - t0
ttread = ttt.time()
# if verbose: print("Read Image. Execution time : {:.3f} seconds".format(dtread))
if isinstance(Data_for_localMaxima, str):
# if verbose: print("Using Data_for_localMaxima for local maxima search: --->", Data_for_localMaxima)
# compute and remove background from this image
if Data_for_localMaxima == "auto_background":
# if verbose:
# print("computing background from current image ", filename)
backgroundimage = ImProc.compute_autobackground_image(Data, boxsizefilter=10)
# basic substraction
usemask = True
# path to a background image file
else:
if stackimageindex == -1:
raise ValueError("Use stacked images as background is not implement")
path_to_bkgfile = Data_for_localMaxima
if verbose: print("Using image file {} as background".format(path_to_bkgfile))
try:
backgroundimage, _, _ = IOimage.readCCDimage(path_to_bkgfile,
CCDLabel=CCDLabel)
except IOError:
raise ValueError("{} does not seem to be a path file ".format(path_to_bkgfile))
usemask = False
# if verbose: print("Removing background for local maxima search")
Data = ImProc.computefilteredimage(Data, backgroundimage, CCDLabel, usemask=usemask,
formulaexpression=formulaexpression)
# if verbose > 1: print("Data.shape for local maxima", Data.shape)
# --- PRE SELECTION OF HOT PIXELS as STARTING POINTS FOR FITTING ---------
# first method ---------- "Basic Intensity Threshold"
if local_maxima_search_method in (0, "0"):
# if verbose: print("Using simple intensity thresholding to detect local maxima (method 1/3)")
res = ImProc.LocalMaxima_from_thresholdarray(Data, IntensityThreshold=IntensityThreshold,
rois=listrois,
framedim=framedim,
outputIpixmax=outputIpixmax)
if res is not None:
if outputIpixmax:
peaklist, Ipixmax = res
ComputeIpixmax = True
else:
peaklist = res
Ipixmax = np.ones(len(peaklist)) * IntensityThreshold
ComputeIpixmax = False
# second method ----------- "Local Maxima in a box by shift array method"
if local_maxima_search_method in (1, "1"):
# flat top peaks (e.g. saturation) are NOT well detected
# if verbose: print("Using shift arrays to detect local maxima (method 2/3)")
peaklist, Ipixmax = ImProc.LocalMaxima_ShiftArrays(Data,
framedim=framedim,
IntensityThreshold=IntensityThreshold,
Saturation_value=Saturation_value_flatpeak,
boxsize_for_probing_minimal_value_background=boxsize, # 30
pixeldistance_remove_duplicates=PixelNearRadius, # 25
nb_of_shift=boxsize) # 25
ComputeIpixmax = True
# third method: ------------ "Convolution by a gaussian kernel"
if local_maxima_search_method in (2, "2"):
# if verbose: print("Using mexican hat convolution to detect local maxima (method 3/3)")
peakValConvolve, boxsizeConvolve, central_radiusConvolve = paramsHat
Candidates = ImProc.LocalMaxima_KernelConvolution(Data,
framedim=framedim,
peakValConvolve=peakValConvolve,
boxsizeConvolve=boxsizeConvolve,
central_radiusConvolve=central_radiusConvolve,
thresholdConvolve=thresholdConvolve, # 600 for CdTe
connectivity=1,
IntensityThreshold=IntensityThreshold,
boxsize_for_probing_minimal_value_background=PixelNearRadius,
return_nb_raw_blobs=return_nb_raw_blobs,
peakposition_definition=peakposition_definition)
if Candidates is None:
# if verbose: print("No local maxima found, change peak search parameters !!!")
return None
if return_nb_raw_blobs == 1:
peaklist, Ipixmax, nbrawblobs = Candidates
else:
peaklist, Ipixmax = Candidates
# print "len(peaklist)", peaklist.shape
# print "Ipixmax", Ipixmax.shape
ComputeIpixmax = True
# print "Ipixmax after convolution method", Ipixmax
# -------------------------------------------------------------
# --- END of blobs search methods calls
if (peaklist is None
or peaklist is []
or peaklist is np.array([])
or (len(peaklist) == 0)):
# if verbose: print("No local maxima found, change peak search parameters !!!")
return None
# pixel origin correction due to ROI croping
if center is not None:
x1, y1 = center # TODO: to ne checked !!
peaklist = peaklist + np.array([x1, y1])
if write_execution_time:
dtsearch = ttt.time() - float(ttread)
# if verbose: print("Local maxima search. Execution time : {:.3f} seconds".format(dtsearch))
# removing some duplicates ------------
if len(peaklist) >= 2:
nb_peaks_before = len(peaklist)
# print "%d peaks in peaklist before purge" % nb_peaks_before
# print 'peaklist',in peaklist before purge
if len(peaklist) >= NumberMaxofFits:
if verbose:
print("TOO MUCH peaks to handle.")
print("(in PeakSearch) It may stuck the computer.")
print("Try to reduce the number of Local Maxima or\n reduce "
"NumberMaxofFits in PeakSearch()")
return None
Xpeaklist, Ypeaklist, tokeep = GT.removeClosePoints(peaklist[:, 0], peaklist[:, 1],
dist_tolerance=2)
peaklist = np.array([Xpeaklist, Ypeaklist]).T
Ipixmax = np.take(Ipixmax, tokeep)
if verbose: print("Keep {} from {} initial peaks (ready for peak positions and shape fitting)".format(
len(peaklist), nb_peaks_before))
# -----------------------------------------------
#-------------------------------------------------
# remove black listed peaks option
# and update peaklist
if Remove_BlackListedPeaks_fromfile is not None and len(peaklist)>1:
if not isinstance(Remove_BlackListedPeaks_fromfile, str):
# array of XY shape = (n,2)
XY_blacklisted = Remove_BlackListedPeaks_fromfile
elif Remove_BlackListedPeaks_fromfile.endswith(('.dat', '.fit')):
XY_blacklisted = Get_blacklisted_spots(Remove_BlackListedPeaks_fromfile)
if XY_blacklisted is None: print('No or only 1 Blacklisted spots found...')
else: #
X, Y = peaklist[:, :2].T
(peakX, _, tokeep) = GT.removeClosePoints_two_sets([X, Y], XY_blacklisted,
dist_tolerance=maxPixelDistanceRejection,
verbose=0)
npeak_before = len(X)
npeak_after = len(peakX)
if verbose: print("\n Removed {} (over {}) peaks belonging to the blacklist {}\n".format(
npeak_before - npeak_after,
npeak_before,
Remove_BlackListedPeaks_fromfile))
peaklist = np.take(peaklist, tokeep, axis=0)
Ipixmax = Ipixmax[tokeep]
#-------------------------------------------------
# ---- ----------- no FITTING ----------------------------
# NO FIT and return raw list of local maxima
if fit_peaks_gaussian == 0:
if position_definition == 1: # XMAS like offset
peaklist[:, :2] = peaklist[:, :2] + np.array([1, 1])
if position_definition == 2: # fit2D offset
peaklist[:, 0] = peaklist[:, 0] + 0.5
peaklist[:, 1] = framedim[0] - peaklist[:, 1] + 0.5
if verbose:
print("{} local maxima found".format(len(peaklist)))
print("20 first peaks", peaklist[:20])
# tabpeak mimics the array built after fitting procedures
tabpeak = np.zeros((len(peaklist[:, 0]), 10))
tabpeak[:, 0] = peaklist[:, 0]
tabpeak[:, 1] = peaklist[:, 1]
tabpeak[:, 2] = Ipixmax
# return tabpeak, peaklist, peaklist, peaklist # no fitdata return raw list of local maxima
lastelem = peaklist
if return_nb_raw_blobs == 1:
lastelem = nbrawblobs
return tabpeak, peaklist, peaklist, lastelem
if (peaklist is None
or peaklist is []
or peaklist is np.array([])
or (len(peaklist) == 0)):
# print("No local maxima found, no peaks to fit !!!")
return None
# ---- ---------------FITTING ----------------------------
# gaussian fitdata
elif fit_peaks_gaussian == 1:
type_of_function = "gaussian"
# lorentzian fitdata
elif fit_peaks_gaussian == 2:
type_of_function = "lorentzian"
else:
raise ValueError("optional fit_peaks_gaussian value is not understood! Must be 0,1 or 2")
if verbose:
print("\n*****************")
print("{} local maxima found".format(len(peaklist)))
print("\n Fitting of each local maxima\n")
if center is not None:
position_start = "centers"
else:
position_start = "max"
# if Data_for_localMaxima will be used for refining peak positions
if Fit_with_Data_for_localMaxima:
Data_to_Fit = (Data, framedim, fliprot)
else:
Data_to_Fit = None
return fitoneimage_manypeaks(filename,
peaklist,
boxsize,
stackimageindex,
CCDLabel=CCDLabel,
dirname=None,
position_start=position_start,
type_of_function=type_of_function,
xtol=xtol,
FitPixelDev=FitPixelDev,
Ipixmax=Ipixmax,
MaxIntensity=Saturation_value,
MinIntensity=MinIntensity,
PeakSizeRange=PeakSizeRange,
verbose=verbose,
position_definition=position_definition,
NumberMaxofFits=NumberMaxofFits,
ComputeIpixmax=ComputeIpixmax,
use_data_corrected=Data_to_Fit,
reject_negative_baseline=reject_negative_baseline)
def Get_blacklisted_spots(filename):
XY_blacklisted = None
if filename.endswith('.dat'):
data_peak_blacklisted = IOLT.read_Peaklist(filename, dirname=None)
if len(data_peak_blacklisted) > 1:
XY_blacklisted = data_peak_blacklisted[:, :2].T
elif filename.endswith('.fit'):
resdata = IOLT.readfile_fit(filename)
if resdata is not None:
allgrainsspotsdata = resdata[4]
nspots = len(allgrainsspotsdata)
else:
nspots = 0
if nspots > 1:
XY_blacklisted = allgrainsspotsdata[:, 7:9].T
return XY_blacklisted
def peaksearch_on_Image(filename_in, pspfile, background_flag="no", blacklistpeaklist=None,
dictPeakSearch={},
CCDLabel="MARCCD165",
outputfilename=None,
psdict_Convolve=PEAKSEARCHDICT_Convolve,
verbose=0):
r"""
Perform a peaksearch by using .psp file
# still not very used and checked?
# missing dictPeakSearch as function argument for formulaexpression or dict_param??
"""
dict_param = readPeakSearchConfigFile(pspfile)
Data_for_localMaxima, formulaexpression = read_background_flag(background_flag)
blacklistedpeaks_file = set_blacklist_filepath(blacklistpeaklist)
dict_param["Data_for_localMaxima"] = Data_for_localMaxima
dict_param["formulaexpression"] = formulaexpression
dict_param["Remove_BlackListedPeaks_fromfile"] = blacklistedpeaks_file
# create a data considered as background from an imagefile
BackgroundImageCreated = False
flag_for_backgroundremoval = dict_param["Data_for_localMaxima"]
# flag_for_backgroundremoval is a file path to an imagefile
# create background data: dataimage_bkg
if flag_for_backgroundremoval not in ("auto_background", None) and not isinstance(
flag_for_backgroundremoval, np.ndarray):
fullpath_backgroundimage = psdict_Convolve["Data_for_localMaxima"]
# print "fullpath_backgroundimage ", fullpath_backgroundimage
# dirname_bkg, imagefilename_bkg = os.path.split(fullpath_backgroundimage)
# CCDlabel_bkg = CCDLabel
BackgroundImageCreated = True
if verbose: print("consider dataimagefile {} as background".format(fullpath_backgroundimage))
if "formulaexpression" in dictPeakSearch:
formulaexpression = dictPeakSearch["formulaexpression"]
else:
raise ValueError('Missing "formulaexpression" to operate on images before peaksearch in ' 'peaksearch_fileseries()')
# saturationlevel = DictLT.dict_CCD[CCDLabel][2]
# dataimage_corrected = applyformula_on_images(dataimage_raw,
# dataimage_bkg,
# formulaexpression=formulaexpression,
# SaturationLevel=saturationlevel,
# clipintensities=True)
if verbose: print("using {} in peaksearch_fileseries".format(formulaexpression))
# for finding local maxima in image from formula
psdict_Convolve["Data_for_localMaxima"] = fullpath_backgroundimage
# for fitting peaks in image from formula
psdict_Convolve["reject_negative_baseline"] = False
psdict_Convolve["formulaexpression"] = formulaexpression
psdict_Convolve["Fit_with_Data_for_localMaxima"] = True
Res = PeakSearch(filename_in,
CCDLabel=CCDLabel,
Saturation_value=DictLT.dict_CCD[CCDLabel][2],
Saturation_value_flatpeak=DictLT.dict_CCD[CCDLabel][2],
**psdict_Convolve)
if Res in (False, None):
print("No peak found for image file: ", filename_in)
return None
# write file with comments
Isorted, _, _ = Res[:3]
if outputfilename:
params_comments = "Peak Search and Fit parameters\n"
params_comments += "# {}: {}\n".format("CCDLabel", CCDLabel)
for key, val in list(psdict_Convolve.items()):
if not BackgroundImageCreated or | |
and corresponding Cell instances as values.
The Cell instance associated with a given key is used as a template
for the other cells of its type in the population.
gid_ranges : dict
A dictionary of unique identifiers of each real and artificial cell
in the network. Every cell type is represented by a key read from
cell_types, followed by keys read from external_drives. The value
of each key is a range of ints, one for each cell in given category.
Examples: 'L2_basket': range(0, 270), 'evdist1': range(272, 542), etc
pos_dict : dict
Dictionary containing the coordinate positions of all cells.
Keys are 'L2_pyramidal', 'L5_pyramidal', 'L2_basket', 'L5_basket',
or any external drive name
cell_response : CellResponse
An instance of the CellResponse object.
external_drives : dict (keys: drive names) of dict (keys: parameters)
The external driving inputs to the network. Drives are added by
defining their spike-time dynamics, and their connectivity to the real
cells of the network. Event times are instantiated before simulation,
and are stored under the ``'events'``-key (list of list; first
index for trials, second for event time lists for each drive cell).
external_biases : dict of dict (bias parameters for each cell type)
The parameters of bias inputs to cell somata, e.g., tonic current clamp
connectivity : list of dict
List of dictionaries specifying each cell-cell and drive-cell
connection
rec_arrays : dict
Stores electrode position information and voltages recorded by them
for extracellular potential measurements. Multiple electrode arrays
may be defined as unique keys. The values of the dictionary are
instances of :class:`hnn_core.extracellular.ExtracellularArray`.
threshold : float
Firing threshold of all cells.
delay : float
Synaptic delay in ms.
Notes
----
`net = jones_2009_model(params)` is the reccomended path for creating a
network. Instantiating the network as `net = Network(params)` will
produce a network with no cell-to-cell connections. As such,
connectivity information contained in `params` will be ignored.
"""
def __init__(self, params, add_drives_from_params=False, legacy_mode=True):
# Save the parameters used to create the Network
_validate_type(params, dict, 'params')
self._params = params
# Initialise a dictionary of cell ID's, which get used when the
# network is constructed ('built') in NetworkBuilder
# We want it to remain in each Network object, so that the user can
# interrogate a built and simulated net. In addition, CellResponse is
# attached to a Network during simulation---Network is the natural
# place to keep this information
self.gid_ranges = dict()
self._n_gids = 0 # utility: keep track of last GID
# XXX this can be removed once tests are made independent of HNN GUI
# creates nc_dict-entries for ALL cell types
self._legacy_mode = legacy_mode
# Source dict of names, first real ones only!
cell_types = {
'L2_basket': basket(cell_name=_short_name('L2_basket')),
'L2_pyramidal': pyramidal(cell_name=_short_name('L2_pyramidal')),
'L5_basket': basket(cell_name=_short_name('L5_basket')),
'L5_pyramidal': pyramidal(cell_name=_short_name('L5_pyramidal'))
}
self.cell_response = None
# external drives and biases
self.external_drives = dict()
self.external_biases = dict()
# network connectivity
self.connectivity = list()
self.threshold = self._params['threshold']
self.delay = 1.0
# extracellular recordings (if applicable)
self.rec_arrays = dict()
# contents of pos_dict determines all downstream inferences of
# cell counts, real and artificial
self._n_cells = 0 # currently only used for tests
self.pos_dict = dict()
self.cell_types = dict()
self._N_pyr_x = self._params['N_pyr_x']
self._N_pyr_y = self._params['N_pyr_y']
self._inplane_distance = 1.0 # XXX hard-coded default
self._layer_separation = 1307.4 # XXX hard-coded default
self.set_cell_positions(inplane_distance=self._inplane_distance,
layer_separation=self._layer_separation)
for cell_name in cell_types:
self._add_cell_type(cell_name, self.pos_dict[cell_name],
cell_template=cell_types[cell_name])
if add_drives_from_params:
_add_drives_from_params(self)
def __repr__(self):
class_name = self.__class__.__name__
s = ("%d x %d Pyramidal cells (L2, L5)"
% (self._N_pyr_x, self._N_pyr_y))
s += ("\n%d L2 basket cells\n%d L5 basket cells"
% (len(self.pos_dict['L2_basket']),
len(self.pos_dict['L5_basket'])))
return '<%s | %s>' % (class_name, s)
def set_cell_positions(self, *, inplane_distance=None,
layer_separation=None):
"""Set relative positions of cells arranged in a square grid
Note that it is possible to change only a subset of the parameters
(the default value of each is None, which implies no change).
Parameters
----------
inplane_distance : float
The in plane-distance (in um) between pyramidal cell somas in the
square grid. Note that this parameter does not affect the amplitude
of the dipole moment.
layer_separation : float
The separation of pyramidal cell soma layers 2/3 and 5. Note that
this parameter does not affect the amplitude of the dipole moment.
"""
if inplane_distance is None:
inplane_distance = self._inplane_distance
_validate_type(inplane_distance, (float, int), 'inplane_distance')
if not inplane_distance > 0.:
raise ValueError('In-plane distance must be positive, '
f'got: {inplane_distance}')
if layer_separation is None:
layer_separation = self._layer_separation
_validate_type(layer_separation, (float, int), 'layer_separation')
if not layer_separation > 0.:
raise ValueError('Layer separation must be positive, '
f'got: {layer_separation}')
pos = _create_cell_coords(n_pyr_x=self._N_pyr_x, n_pyr_y=self._N_pyr_y,
zdiff=layer_separation,
inplane_distance=inplane_distance)
# update positions of the real cells
for key in pos.keys():
self.pos_dict[key] = pos[key]
# update drives to be positioned at network origin
for drive_name, drive in self.external_drives.items():
pos = [self.pos_dict['origin']] * drive['n_drive_cells']
self.pos_dict[drive_name] = pos
self._inplane_distance = inplane_distance
self._layer_separation = layer_separation
def copy(self):
"""Return a copy of the Network instance
The returned copy retains the intrinsic connectivity between cells, as
well as those of any external drives or biases added to the network.
The parameters of drive dynamics are also retained, but the
instantiated ``events`` of the drives are cleared. This allows
iterating over the values defining drive dynamics, without the need to
re-define connectivity. Extracellular recording arrays are retained in
the network, but cleared of existing data.
Returns
-------
net_copy : instance of Network
A copy of the instance with previous simulation results and
``events`` of external drives removed.
"""
net_copy = deepcopy(self)
net_copy._reset_drives()
net_copy._reset_rec_arrays()
return net_copy
def add_evoked_drive(self, name, *, mu, sigma, numspikes, location,
n_drive_cells='n_cells', cell_specific=True,
weights_ampa=None, weights_nmda=None,
space_constant=3., synaptic_delays=0.1,
probability=1.0, event_seed=2, conn_seed=3):
"""Add an 'evoked' external drive to the network
Parameters
----------
name : str
Unique name for the drive
mu : float
Mean of Gaussian event time distribution
sigma : float
Standard deviation of event time distribution
numspikes : int
Number of spikes at each target cell
location : str
Target location of synapses ('distal' or 'proximal')
n_drive_cells : int | 'n_cells'
The number of drive cells that each contribute an independently
sampled synaptic spike to the network according to the Gaussian
time distribution (mu, sigma). If n_drive_cells='n_cells'
(default) and cell_specific=True, a drive cell gets assigned to
each available simulated cell in the network with 1-to-1
connectivity. Otherwise, drive cells are assigned with
all-to-all connectivity. If you wish to synchronize the timing of
this evoked drive across the network in a given trial with one
spike, set n_drive_cells=1 and cell_specific=False.
cell_specific : bool
Whether each artifical drive cell has 1-to-1 (True, default) or
all-to-all (False) connection parameters. Note that 1-to-1
connectivity requires that n_drive_cells='n_cells', where 'n_cells'
denotes the number of all available cells that this drive can
target in the network.
weights_ampa : dict or None
Synaptic weights (in uS) of AMPA receptors on each targeted cell
type (dict keys). Cell types omitted from the dict are set to zero.
weights_nmda : dict or None
Synaptic weights (in uS) of NMDA receptors on each targeted cell
type (dict keys). Cell types omitted from the dict are set to zero.
synaptic_delays : dict or float
Synaptic delay (in ms) at the column origin, dispersed laterally as
a function of the space_constant. If float, applies to all target
cell types. Use dict to create delay->cell mapping.
space_constant : float
Describes lateral dispersion (from the column origin) of synaptic
weights and delays within the simulated column. The constant is
measured in the units of ``inplane_distance`` of
:class:`~hnn_core.Network`. For example, for ``space_constant=3``,
the weights are modulated by the factor
``exp(-(x / (3 * inplane_distance)) ** 2)``, where x is the
physical distance (in um) between the connected cells in the xy
plane (delays are modulated by the inverse of this factor).
probability : dict or float (default: 1.0)
Probability of connection between any src-target pair.
Use dict to create probability->cell mapping. If float, applies to
all target cell types
event_seed : int
Optional initial seed for random number | |
<filename>old/montecarlo/cfr.py
#!/usr/bin/env python3
import asyncio
import collections
import copy
import math
import numpy as np
import random
import sys
from game import Game
import model
import moves
#MCCFR with either External Sampling or Average Sampling
#TODO deal with purging data between searches in a nicer manner
#doing it in combine is too early, as combine is called before getProbs
#but doing it in search is too late, as copyFromAgent is called before search
#but we shouldn't do it in copyFromAgent as that isn't always used
#
#using isClean is dirty but it works
#sampling types
EXTERNAL = 1
AVERAGE = 2
#early state evaluation types
HEURISTIC = 1
ROLLOUT = 2
MODEL = 3
class CfrAgent:
#AS parameters:
#exploration gives all action a chance to be taken
#bonus is for early exploration
#threshold is so any action with prob > 1/threshold is always taken
#bound is the maximum number of actions that can be taken, 0 for disabled
#depth limit (if not None) replaces tree traversal with evaluation option
#evaluation: HEURISTIC is expValueHeurisitic(), rollout does a rollout, model uses an evalModel (to be implemented)
def __init__(self, teams, format,
samplingType=EXTERNAL, exploration=0, bonus=0, threshold=1, bound=0,
posReg=False, probScaling=0, regScaling=0,
depthLimit=None, evaluation=HEURISTIC, evalModel=None,
verbose=False):
self. teams = teams
self.format = format
self.samplingType = samplingType
self.exploration = exploration
self.bonus = bonus
self.threshold = threshold
self.bound = bound
self.posReg = posReg
self.probScaling = probScaling
self.regScaling = regScaling
self.depthLimit = depthLimit
self.evaluation = evaluation
self.evalModel = evalModel
self.verbose = verbose
self.numActionsSeen = 0
self.numActionsTaken = 0
#clean means we don't have to clear our tables
self.isClean = True
self.regretTables = [{}, {}]
self.probTables = [{}, {}]
#this is an experimental feature to bootstrap data from a separate agent
#this requires that CfrAgent and the other agent use the same internal data format
def copyFromAgent(self, other):
#purge before we search, this limits the memory usage
#have to do it here as we don't want to purge data that we're
#about to copy in
self.regretTables = [{}, {}]
self.probTables = [{}, {}]
self.isClean = True
self.regretTables = other.regretTables
#we'll test copying prob tables over if regret tables work
#I'm mainly interested in boosting the quality of the off-player's strategy
#which is entirely determined by regret
#self.probTables = other.probTables
async def search(self, ps, pid=0, limit=100, seed=None, initActions=[[], []]):
#turn init actions into a useful history
history = [(None, a1, a2) for a1, a2 in zip(*initActions)]
#insert the seed in the first turn
if len(history) > 0:
_, a1, a2 = history[0]
history[0] = (seed, a1, a2)
#if we already purged for this turn, don't do it twice
#as we might have some useful data loaded in
if not self.isClean:
#purge before we search, this limits the memory usage
self.regretTables = [{}, {}]
self.probTables = [{}, {}]
self.isClean = False
#each iteration returns an expected value
#so we track this and return an average
p1ExpValueTotal = 0
p2ExpValueTotal = 0
print(end='', file=sys.stderr)
for i in range(limit):
print('\rTurn Progress: ' + str(i) + '/' + str(limit), end='', file=sys.stderr)
game = Game(ps, self.teams, format=self.format, seed=seed, verbose=self.verbose)
await game.startGame()
await game.applyHistory(history)
self.numActionsSeen = 0
self.numActionsTaken = 0
expValue = await self.cfrRecur(ps, game, seed, history, 1, i)
if i % 2 == 0:
p1ExpValueTotal += expValue
else:
p2ExpValueTotal += expValue
print(file=sys.stderr)
print('p1 exp value', 2 * p1ExpValueTotal / limit, file=sys.stderr)
print('p2 exp value', 2 * p2ExpValueTotal / limit, file=sys.stderr)
def combine(self):
#we'll do our combining and purging before we search
pass
def getProbs(self, player, state, actions):
pt = self.probTables[player]
rt = self.probTables[player]
probs = np.array([dictGet(pt, (state, a)) for a in actions])
pSum = np.sum(probs)
if pSum > 0:
return probs / np.sum(probs)
else:
return np.array([1 / len(actions) for a in actions])
#recursive implementation of cfr
#history is a list of (seed, action, action) tuples
#q is the sample probability
#assumes the game has already had the history applied
async def cfrRecur(self, ps, game, startSeed, history, q, iter, depth=0, rollout=False):
#I'm not sure about this q parameter
#I'm getting better results setting it to 1 in all games
q = 1
async def endGame():
side = 'bot1' if iter % 2 == 0 else 'bot2'
winner = await game.winner
#have to clear the results out of the queues
while not game.p1Queue.empty():
await game.p1Queue.get()
while not game.p2Queue.empty():
await game.p2Queue.get()
if winner == side:
return 1 / q
else:
return 0
cmdHeaders = ['>p1', '>p2']
queues = [game.p1Queue, game.p2Queue]
offPlayer = (iter+1) % 2
onPlayer = iter % 2
#off player
request = (await queues[offPlayer].get())
if request[0] == Game.END:
return await endGame()
req = request[1]
state = req['stateHash']
actions = moves.getMoves(self.format, req)
#just sample a move
probs = self.regretMatch(offPlayer, state, actions)
#apply exploration chance to off-player as well
exploreProbs = probs * (1 - self.exploration) + self.exploration / len(actions)
#or don't
#exploreProbs = probs
offAction = np.random.choice(actions, p=exploreProbs)
#and update average stategy
self.updateProbs(offPlayer, state, actions, probs / q, iter)
#on player
request = (await queues[onPlayer].get())
if request[0] == Game.END:
return await endGame()
req = request[1]
#now that we've checked if the game is over,
#let's check depth before continuing
if self.depthLimit != None and depth >= self.depthLimit:
if self.evaluation == HEURISTIC:
#immediately return a heuristic-based expected value
await game.cmdQueue.put('>forcewin p1')
#clean up the end game messages
await queues[onPlayer].get()
await queues[offPlayer].get()
return expValueHeuristic(onPlayer, req['state']) / q
elif self.evaluation == ROLLOUT:
#instead of branching out, find the actual value of a single
#play-through and use that as the expected value
rollout = True
#rest of rollout is implemented with the normal code path
elif self.evaluation == MODEL:
#TODO
pass
state = req['stateHash']
actions = moves.getMoves(self.format, req)
#we sometimes bias towards the first or last actions
#this fixes that bias
random.shuffle(actions)
#probs is the set of sample probabilities, used for traversing
#iterProbs is the set of probabilities for this iteration's strategy, used for regret
if rollout:
#I'm not sure if using regret matching or going uniform random
#would be better
#my gut says regret matching
probs = self.regretMatch(onPlayer, state, actions)
action = np.random.choice(actions, p=probs)
actions = [action]
probs = [1] # would it be better to use the actual probability?
iterProbs = probs
elif self.samplingType == EXTERNAL:
probs = self.regretMatch(onPlayer, state, actions)
iterProbs = probs
elif self.samplingType == AVERAGE:
#we're just using the current iteration's strategy
#it's simple and it seems to work
iterProbs = self.regretMatch(onPlayer, state, actions)
probs = iterProbs + self.exploration
#this is the average-sampling procedure from some paper
#it's designed for a large number of samples, so it doesn't really
#work. It expects it to be feasible to try every action for the
#on player on some turns, which usually isn't the case
"""
stratSum = 0
strats = []
pt = self.probTables[onPlayer]
for a in actions:
s = dictGet(pt, (state, a))
stratSum += s
strats.append(s)
probs = []
for a,s in zip(actions, strats):
if self.bonus + stratSum == 0:
p = 0
else:
p = (self.bonus + self.threshold * s) / (self.bonus + stratSum)
p = max(self.exploration, p)
probs.append(p)
"""
#keep track of how many actions we take from this state
numTaken = 0
#get expected reward for each action
rewards = []
gameUsed = False
self.numActionsSeen += len(actions)
#whether a specific action is a rollout
curRollout = rollout
for action, prob in zip(actions, probs):
#for ES we just check every action
#for AS use a roll to determine if we search
if self.samplingType == AVERAGE and not curRollout:
#instead of skipping, try making the skipped entries a rollout
#like in https://www.aaai.org/ocs/index.php/AAAI/AAAI12/paper/viewFile/4937/5469
#if we're at the last action and we haven't done anything, do something regardless of roll
if (self.bound != 0 and numTaken > self.bound) or random.random() >= prob and (action != actions[-1] or gameUsed):
curRollout = True
#rewards.append(0)
#continue
else:
curRollout = rollout
numTaken += 1
self.numActionsTaken += 1
#don't have to re-init game for the first action
if gameUsed:
game = Game(ps, self.teams, format=self.format, seed=startSeed, verbose=self.verbose)
await game.startGame()
await game.applyHistory(history)
#need to consume two requests, as we consumed two above
| |
import pickle
import os
import sys
import colorsys
import math
import copy
import tkinter as tk
from collections import namedtuple
from tkinter import filedialog, messagebox, simpledialog
from tkinter import *
from GAEV import *
cur_dir = os.getcwd()
sets_to_highlight = {} # {"blue": [gene1, gene,2 gene5], "green": [gene4, gene7, gene9], ...}
pathway_info_full = {}
pathway_info_query = {}
gene_info_full = {}
gene_info_query = {}
color_hex_dict = {} # will store the color hex of each k code in query {"K14515": }
def load_full_annotation(data_file_path_full):
# imports data from files and stores it into appropriate lists and dict for later access
with open(data_file_path_full, "rb") as f: # open data file that was generated previously
if not pickle.load(f): # reads the first line of saved data which tells whether it is complete or not
messagebox.showerror("Data is not complete") # exits program if data is not complete
return
for _ in range(pickle.load(f)): # reads line that tells program how many data entries there are in genes
gene = pickle.load(f) # stores the Gene object as gene
if gene.k_code in gene_info_full: # dictionary with the k code as the key and gene object stored in list
gene_info_full[gene.k_code].append(gene) # adds gene to list of other genes with shared k codes in dict
else:
gene_info_full[gene.k_code] = [gene] # adds new k code entry in dict as key with corresponding gene in list
for _ in range(pickle.load(f)): # reads line that tells program how many data entries there are in pathways
pathway = pickle.load(f)
pathway_info_full[pathway.map_code] = pathway # loads data into pathway_info_1
# will simply read file and return list with each striped line on an element
def load_set(set_input_path):
output_list = []
with open(set_input_path, 'r') as f:
for line in f:
output_list.append(line.strip()) # strip to remove unexpected blank spaces / new lines
return output_list
def generate_gene_info_query(): # generates a deep copy of gene_info_full and only keeps genes that are in sets_to_highlight
global gene_info_query
# makes a new copy of gene_info_full that can be changed independently of the original
gene_info_query = copy.deepcopy(gene_info_full)
# removes all genes in gene_info_query that are not in the set to be highlighted, removed genes will appear as
# grey on the pathway
for k_code in gene_info_full:
gene_info_query[k_code] = [gene for gene in gene_info_full[k_code]
if any([gene.gene_num in gene_set for gene_set in sets_to_highlight.values()])]
# removes all k_codes form gene_info_query dict that has no genes in the set to highlight
gene_info_query = dict([(k_code, gene_list) for k_code, gene_list in gene_info_query.items() if gene_list])
def generate_pathway_info_query():
global pathway_info_query
# makes a new copy of pathway_info_full that can be changed independently of the original
pathway_info_query = copy.deepcopy(pathway_info_full)
# empties list of genes involved in pathway from every pathway in query list, so that it may be repopulated with
# only genes from gene_info_query
for map_code in pathway_info_full:
pathway_info_query[map_code].genes_invol = []
for k_code in gene_info_query:
# only have to iterate through one gene object for each k_code since genes with the same k_code will have
# identical link_path
for map_info in gene_info_query[k_code][0].link_path: # map info is [m-code\description, ...]
m_code = map_info[:8] # isolates the map code which is always 8 characters map#####
# calls method to add k_code to genes_invol while keeping the k_code ordered and preventing duplicates
pathway_info_query[m_code].add_gene(k_code)
pathway_info_query = dict([(m_code, pathway) for m_code, pathway in pathway_info_query.items()
if pathway.genes_invol and len(pathway_info_full[m_code].genes_invol) < 104])
# will specific the color of each gene by K code, if more than one color is used then
def generate_color_hex_dict():
for k_code in gene_info_query:
color_list = []
# finds highlight color associated with the gene and adds it to the color list
for gene in gene_info_query[k_code]:
for color in sets_to_highlight.keys():
if gene.gene_num in sets_to_highlight[color]:
color_list.append(color)
# will blend all colors from all genes associated with the k_code
# ex. blue + red = purple; it is weighted by occurrence of color blue + 3 red = dark pink
blend_dict = {} # {color_hex: weight, color_hex: weight, ...} where weight is number of occurrence
for color in set(color_list):
blend_dict[color] = color_list.count(color)
color_hex_dict[k_code] = combine_hex_values(blend_dict)
# for every k_code that is in the full annotation, but not included in any sets to highlight
for k_code in gene_info_full:
if k_code not in color_hex_dict.keys():
color_hex_dict[k_code] = "d9d9d9" # stores a hex for a light grey color
# accepts a dict of {color_hex: weight, color_hex: weight, ...} and returns a color hex that blends all the colors
def combine_hex_values(d):
d_items = sorted(d.items())
tot_weight = sum(d.values())
red = int(sum([int(k[:2], 16)*v for k, v in d_items])/tot_weight)
green = int(sum([int(k[2:4], 16)*v for k, v in d_items])/tot_weight)
blue = int(sum([int(k[4:6], 16)*v for k, v in d_items])/tot_weight)
zpad = lambda x: x if len(x)==2 else '0' + x
return zpad(hex(red)[2:]) + zpad(hex(green)[2:]) + zpad(hex(blue)[2:])
def _new_generate_url(self): #
self.url = "http://www.kegg.jp/kegg-bin/show_pathway?map=" + self.map_code + "&multi_query=" # sets the which pathway to use
for k_code in self.genes_invol: # goes through each unique k_code in pathway
self.url = self.url + k_code + "+%23" + color_hex_dict[k_code] + "%0a" # adds gene's k_code to the end of the url and specifies color
return self.url
Pathway_MAP.generate_url = _new_generate_url # overrides the generate_url method to include unique color value
def make_html_table_rows(sorted_m_codes):
html_rows = ""
for m_code in sorted_m_codes:
# generates the text for the hyperlink (name + num of associated genes + percentage of genes out of total)
query_pathway = pathway_info_query[m_code]
full_pathway = pathway_info_full[m_code]
hyper_text = query_pathway.name + "(" + str(len(query_pathway.genes_invol)) + ", " + \
"{:.2f}%".format(len(query_pathway.genes_invol) / len(full_pathway.genes_invol) * 100) + ")"
url = full_pathway.generate_url() # generates the url
hyperlink = "<a href=\"" + url + "\">" + hyper_text + "</a>" # embeds hyperlink to text
row = "<tr><td>" + hyperlink + "</td></tr>"
html_rows = html_rows + row
return html_rows
class PathwayInfo:
query_pathway: Pathway_MAP
full_pathway: Pathway_MAP
percentage: float
def __init__(self, query_pathway, full_pathway, color_hex, percentage=0):
self.query_pathway = query_pathway
self.full_pathway = full_pathway
self.percentage = percentage
def generate_html(output_path):
with open(output_path, 'w+') as f:
html_code = """<html>
<head>
<style>
html {
/*width: 100%;*/ /*required if using % width*/
/*height: 100%;*/ /*required if using % height*/
}
body {
/*width: 100%;*/ /*required if using % width*/
/*height: 100%;*/ /*required if using % height*/
/*margin: 0;*/ /*required if using % width or height*/
/*padding: 0 20px 0 20px;*/ /*purely aesthetic, not required*/
/*box-sizing: border-box;*/ /*required if using above declaration*/
background: white;
text-align: center; /*delete if using % width, or you don't want table centered*/
}
.scrollingtable {
box-sizing: border-box;
display: inline-block;
vertical-align: middle;
overflow: hidden;
width: auto; /*set table width here if using fixed value*/
/*min-width: 100%;*/ /*set table width here if using %*/
height: 600px; /*set table height here; can be fixed value or %*/
/*min-height: 104px;*/ /*if using % height, make this at least large enough to fit scrollbar arrows + captions + thead*/
font-family: Verdana, Tahoma, sans-serif;
font-size: 15px;
line-height: 20px;
padding-top: 20px; /*this determines top caption height*/
padding-bottom: 20px; /*this determines bottom caption height*/
text-align: left;
}
.scrollingtable * {box-sizing: border-box;}
.scrollingtable > div {
position: relative;
border-top: 1px solid black; /*top table border*/
height: 100%;
padding-top: 20px; /*this determines column header height*/
}
.scrollingtable > div:before {
top: 0;
background: cornflowerblue; /*column header background color*/
}
.scrollingtable > div:before,
.scrollingtable > div > div:after {
content: "";
position: absolute;
z-index: -1;
width: 100%;
height: 50%;
left: 0;
}
.scrollingtable > div > div {
/*min-height: 43px;*/ /*if using % height, make this at least large enough to fit scrollbar arrows*/
max-height: 100%;
overflow: scroll; /*set to auto if using fixed or % width; else scroll*/
overflow-x: hidden;
border: 1px solid black; /*border around table body*/
}
.scrollingtable > div > div:after {background: white;} /*match page background color*/
.scrollingtable > div > div > table {
width: 100%;
border-spacing: 0;
margin-top: -20px; /*inverse of column header height*/
/*margin-right: 17px;*/ /*uncomment if using % width*/
}
.scrollingtable > div > div > table > caption {
position: absolute;
top: -20px; /*inverse of caption height*/
margin-top: -1px; /*inverse of border-width*/
width: 100%;
font-weight: bold;
text-align: center;
}
.scrollingtable > div > div > table > * > tr > * | |
import pandas
import os
import math
import numpy
from scipy import stats
pandas.set_option('display.max_rows', 200, 'display.max_columns', 200) # change it to see more or less rows and/or columns
# Ask inputs to read the TSV file
dirPath = input('Enter path to TSV file: ')
inputName = input('Enter TSV name (input file): ')
# Read input file
inputTSV = pandas.read_csv(os.path.join(dirPath, inputName), sep = '\t', index_col = False, dtype = object)
print('The input TSV has ' + str(len(inputTSV)) + ' variants.')
## Show to the user the options to filter by consequence and impact
print('The different consequence in this input are: ' + ', '.join(inputTSV.Consequence.unique()))
print('The different impact in this input are: ' + ', '.join(inputTSV.IMPACT.unique()))
## Ask inputs to filter
snp_indel_filter = input('Enter "snp", "indel" or "snp|indel": ')
consequence_filter = input('Enter consequence filter (e.g. missense_variant, stop_gained, synonymous_variant), to use multiple consequence separate them with "|", or "all" for all the consequences: ')
impact_filter = input('Enter impact filter (e.g. HIGH, MODERATE, LOW, MODIFIER), to use multiple impact separate them with "|", or "all" for all the impact: ')
gnomADg_AF_nfe_filter = float(input('Enter AF to filter by gnomAD NFE (e.g. 0.05): '))
gnomADg_AF_filter = float(input('Enter AF to filter by gnomAD all population (e.g. 0.05): '))
CSVS_AF_filter = float(input('Enter AF to filter by CSVS (e.g. 0.05): '))
# Transform some columns to float
colsToFloat = ['CADD_PHRED', 'CADD_RAW', 'gnomADg_AF_nfe', 'gnomADg_AF', 'CSVS_AF', 'AF']
for col in colsToFloat:
inputTSV[col] = inputTSV[col].astype(float)
dtypes = dict(inputTSV.dtypes) # Check the types of each column
# Filter variants without symbol
inputTSV_SYMBOL = inputTSV[inputTSV['SYMBOL'].notnull()]
print('After filtering by those variants with gene name there are: ' + str(len(inputTSV_SYMBOL)) + ' variants')
# Filter by SNPs/indels
nt = ['A', 'T', 'C', 'G']
if snp_indel_filter == 'snp':
inputTSV_nt = inputTSV_SYMBOL[inputTSV_SYMBOL['REF'].isin(nt) & inputTSV_SYMBOL['ALT'].isin(nt)]
inputTSV_nt.REF.unique()
inputTSV_nt.ALT.unique()
elif snp_indel_filter == 'indel':
inputTSV_nt = inputTSV_SYMBOL[~inputTSV_SYMBOL['REF'].isin(nt) | ~inputTSV_SYMBOL['ALT'].isin(nt)]
elif snp_indel_filter == 'snp|indel':
inputTSV_nt = inputTSV_SYMBOL
else:
print('Bad snp/indel filter introduced, the options are: "snp", "indel" or "both"')
print('After filtering by ' + snp_indel_filter + ' there are: ' + str(len(inputTSV_nt)) + ' variants')
# Filter by variant type (Consequence)
if consequence_filter == 'all':
consequenceDF = inputTSV_nt
else:
consequenceDF = inputTSV_nt[inputTSV_nt['Consequence'].str.contains(consequence_filter)]
print('After filtering by the consequence(s) ' + consequence_filter + ' there are: ' + str(len(consequenceDF)) + ' variants')
# print(consequenceDF.REF.unique())
# print(consequenceDF.ALT.unique())
# Filter by impact
if impact_filter == 'all':
consequence_impactDF = consequenceDF
else:
consequence_impactDF = consequenceDF[consequenceDF['IMPACT'].str.contains(impact_filter)]
print('After filtering by the impact(s) ' + impact_filter + ' there are: ' + str(len(consequence_impactDF)) + ' variants')
# Filter by AF
## SNVs without data for gnomADg and CSVS should have it to perform the GBA with values for these variants
## Variant without data will be: AC = 0, AF = 0, AN = mean(AN in gene)
### Create table with meanAN for each genes (all SNVs, before filters)
#### gnomAD
cols_meanAN_gnomAD = ['SYMBOL', 'gnomADg_AN_nfe', 'gnomADg_AN']
meanAN_DF_gnomAD = inputTSV[cols_meanAN_gnomAD] # DF with less columns
meanAN_DF_gnomAD = meanAN_DF_gnomAD[meanAN_DF_gnomAD['gnomADg_AN_nfe'].notnull()] # Variants with value
print('There are ' + str(len(meanAN_DF_gnomAD)) + ' variants with value in gnomAD')
colsToNumeric_gnomAD = ['gnomADg_AN_nfe', 'gnomADg_AN'] # Transform columns to numeric
for col in colsToNumeric_gnomAD:
meanAN_DF_gnomAD[col] = meanAN_DF_gnomAD[col].astype('int32')
meanAN_DF_gnomAD = meanAN_DF_gnomAD.groupby(by = ['SYMBOL']).mean() # Calculate the mean for each gene
meanAN_DF_gnomAD = meanAN_DF_gnomAD.round(0).astype(int) # Number without decimals
meanAN_DF_gnomAD = meanAN_DF_gnomAD.reset_index() # Reset index to avoid errors
print('There are ' + str(len(meanAN_DF_gnomAD)) + ' genes with value in gnomAD')
#### CSVS
cols_meanAN_CSVS = ['SYMBOL', 'CSVS_AN']
meanAN_DF_CSVS = inputTSV[cols_meanAN_CSVS]
meanAN_DF_CSVS = meanAN_DF_CSVS[meanAN_DF_CSVS['CSVS_AN'].notnull()]
print('There are ' + str(len(meanAN_DF_CSVS)) + ' variants with value in CSVS')
colsToNumeric_CSVS = ['CSVS_AN']
for col in colsToNumeric_CSVS:
meanAN_DF_CSVS[col] = meanAN_DF_CSVS[col].astype('int32')
meanAN_DF_CSVS = meanAN_DF_CSVS.groupby(by = ['SYMBOL']).mean()
meanAN_DF_CSVS = meanAN_DF_CSVS.round(0).astype(int)
meanAN_DF_CSVS = meanAN_DF_CSVS.reset_index()
print('There are ' + str(len(meanAN_DF_CSVS)) + ' genes with value in CSVS')
### Merge gnomAD and CSVS
meanAN_DF = pandas.merge(meanAN_DF_gnomAD, meanAN_DF_CSVS, how = 'left', left_on = 'SYMBOL', right_on = 'SYMBOL')
#### Genes without value in both databases
symbol_diff = list(set(inputTSV.SYMBOL.unique().tolist()).difference(set(meanAN_DF.SYMBOL.unique().tolist())))
for symb in symbol_diff:
meanAN_DF = meanAN_DF.append({'SYMBOL': symb}, ignore_index = True)
### If there is a gene without any value use as AN the mean of all the genes for the same database
for c in meanAN_DF.columns[1:]:
for r in meanAN_DF.index:
if math.isnan(meanAN_DF.loc[r,c]):
# print('There is a nan in the column: ' + str(c) + ' and in the row: ' + str(r))
meanAN_col = numpy.mean(meanAN_DF[c])
meanAN_DF.loc[r,c] = meanAN_col.round(0).astype(int)
colsToNumeric_meanAN_DF = ['gnomADg_AN_nfe', 'gnomADg_AN', 'CSVS_AN'] # Transform columns to numeric
for col in colsToNumeric_meanAN_DF:
meanAN_DF[col] = meanAN_DF[col].astype('int32')
## Add values to DF
consequence_impact_AFnfe_AFallDF = consequence_impactDF
for r in consequence_impact_AFnfe_AFallDF.index:
if pandas.isnull(consequence_impact_AFnfe_AFallDF['gnomADg'][r]) == True:
# AF & AC
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AF_nfe'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AC_nfe'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AF'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AC'] = 0
# AN
rGene = consequence_impact_AFnfe_AFallDF['SYMBOL'][r]
ANGene_nfe = meanAN_DF.loc[meanAN_DF['SYMBOL'] == rGene, 'gnomADg_AN_nfe'].iloc[0]
ANGene = meanAN_DF.loc[meanAN_DF['SYMBOL'] == rGene, 'gnomADg_AN'].iloc[0]
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AN_nfe'] = ANGene_nfe
consequence_impact_AFnfe_AFallDF.at[r, 'gnomADg_AN'] = ANGene
if pandas.isnull(consequence_impact_AFnfe_AFallDF['CSVS'][r]) == True:
# AF & AC
consequence_impact_AFnfe_AFallDF.at[r, 'CSVS_AF'] = 0
consequence_impact_AFnfe_AFallDF.at[r, 'CSVS_AC'] = 0
# AN
rGene = consequence_impact_AFnfe_AFallDF['SYMBOL'][r]
ANGene_CSVS = meanAN_DF.loc[meanAN_DF['SYMBOL'] == rGene, 'CSVS_AN'].iloc[0]
consequence_impact_AFnfe_AFallDF.at[r, 'CSVS_AN'] = ANGene_CSVS
## Filter by AF of gnomAD nfe, gnomAD and CSVS
consequence_impact_AFnfe_AFallDF_AFCSVS = consequence_impact_AFnfe_AFallDF[(consequence_impact_AFnfe_AFallDF['gnomADg_AF_nfe'] < gnomADg_AF_nfe_filter) & (consequence_impact_AFnfe_AFallDF['gnomADg_AF'] < gnomADg_AF_filter) & (consequence_impact_AFnfe_AFallDF['CSVS_AF'] < CSVS_AF_filter)]
print('After filtering by the AF: ' + str(gnomADg_AF_nfe_filter) + ', ' + str(gnomADg_AF_filter) + ', ' + str(CSVS_AF_filter) + ' there are: ' + str(len(consequence_impact_AFnfe_AFallDF_AFCSVS)) + ' variants')
# Create DF as GBA input
## The necessary columns are the gene and the AC and AN for the cases and each database
colsGBA = ['SYMBOL', 'AC', 'AN', 'gnomADg_AC_nfe', 'gnomADg_AN_nfe', 'gnomADg_AC', 'gnomADg_AN', 'CSVS_AC', 'CSVS_AN'] # AN are the total number of alleles
inputGBA_SNV = consequence_impact_AFnfe_AFallDF_AFCSVS[colsGBA]
## Rename cases colnames
colsRenameIndex = [1,2]
namesColsNew = ['AC_cases', 'AN_cases']
namesColsOld = inputGBA_SNV.columns[colsRenameIndex]
inputGBA_SNV.rename(columns = dict(zip(namesColsOld, namesColsNew)), inplace = True)
## Change to integer, except SYMBOL column
for col in inputGBA_SNV.columns[1:]:
inputGBA_SNV[col] = inputGBA_SNV[col].astype(int)
dtypes = dict(inputGBA_SNV.dtypes) # Check the types of each column
## Calculate WT (wild type): WT = total alleles - allele count (variant)
inputGBA_SNV['WT_cases'] = inputGBA_SNV['AN_cases'] - inputGBA_SNV['AC_cases']
inputGBA_SNV['WT_gnomADg_nfe'] = inputGBA_SNV['gnomADg_AN_nfe'] - inputGBA_SNV['gnomADg_AC_nfe']
inputGBA_SNV['WT_gnomADg'] = inputGBA_SNV['gnomADg_AN'] - inputGBA_SNV['gnomADg_AC']
inputGBA_SNV['WT_CSVS'] = inputGBA_SNV['CSVS_AN'] - inputGBA_SNV['CSVS_AC']
## Remove columns with AN
inputGBA_SNV = inputGBA_SNV[inputGBA_SNV.columns.drop(list(inputGBA_SNV.filter(regex = 'AN')))]
## Calculate the sum for each column grouping by gene name
inputGBA = inputGBA_SNV.groupby(by = ['SYMBOL']).sum()
inputGBA = inputGBA.reset_index()
print('The number of genes for the GBA is: ' + str(len(inputGBA)))
# Extract genes with all SNV novel
## Is not possible to perform a gene burden with values = 0
## Study separatly
novelSNV_genes = inputGBA[(inputGBA['gnomADg_AC_nfe'] == 0) | (inputGBA['gnomADg_AC'] == 0) | (inputGBA['CSVS_AC'] == 0)]
## Save genes with novel SNV
outPath = os.path.join(dirPath, 'GBA')
outName = os.path.join(outPath, os.path.splitext(inputName)[0])
outName_novel = '.'.join([outName, 'GBA', snp_indel_filter.replace('|', '_'), consequence_filter.replace('|', '_'), impact_filter.replace('|', '_'), str(gnomADg_AF_nfe_filter).replace('.', ''), str(gnomADg_AF_filter).replace('.', ''), str(CSVS_AF_filter).replace('.', ''), 'novelSNV', 'tsv'])
novelSNV_genes.to_csv(outName_novel, header = True, index = None, sep = '\t', float_format = '%.16f')
# Odd Ratio
inputGBA_OR = inputGBA
inputGBA_OR['OR_gnomADg_nfe'] = (inputGBA_OR['AC_cases']*inputGBA_OR['WT_gnomADg_nfe'])/(inputGBA_OR['WT_cases']*inputGBA_OR['gnomADg_AC_nfe'])
inputGBA_OR['OR_gnomADg'] = (inputGBA_OR['AC_cases']*inputGBA_OR['WT_gnomADg'])/(inputGBA_OR['WT_cases']*inputGBA_OR['gnomADg_AC'])
inputGBA_OR['OR_CSVS'] = (inputGBA_OR['AC_cases']*inputGBA_OR['WT_CSVS'])/(inputGBA_OR['WT_cases']*inputGBA_OR['CSVS_AC'])
# Standard error
## Calculate the summary
inputGBA_OR_SE = inputGBA_OR
sumList_nfe = ((1/inputGBA_OR_SE['AC_cases']) + (1/inputGBA_OR_SE['gnomADg_AC_nfe']) + (1/inputGBA_OR_SE['WT_cases']) + (1/inputGBA_OR_SE['WT_gnomADg_nfe'])).tolist()
sumList_gnomAD = ((1/inputGBA_OR_SE['AC_cases']) + (1/inputGBA_OR_SE['gnomADg_AC']) + (1/inputGBA_OR_SE['WT_cases']) + (1/inputGBA_OR_SE['WT_gnomADg'])).tolist()
sumList_CSVS = ((1/inputGBA_OR_SE['AC_cases']) + (1/inputGBA_OR_SE['CSVS_AC']) + (1/inputGBA_OR_SE['WT_cases']) + (1/inputGBA_OR_SE['WT_CSVS'])).tolist()
## Perform the sqrt
SElist_nfe = []
for sum in sumList_nfe:
SE = math.sqrt(sum)
SElist_nfe.append(SE)
SElist_gnomAD = []
for sum in sumList_gnomAD:
SE = math.sqrt(sum)
SElist_gnomAD.append(SE)
SElist_CSVS = []
for sum in sumList_CSVS:
SE = math.sqrt(sum)
SElist_CSVS.append(SE)
inputGBA_OR_SE['SE_gnomADg_nfe'] = SElist_nfe
inputGBA_OR_SE['SE_gnomADg'] = SElist_gnomAD
inputGBA_OR_SE['SE_CSVS'] = SElist_CSVS
# inputGBA['SE_gnomADg_nfe'] = math.sqrt((1/inputGBA['AC_cases']) + (1/inputGBA['gnomADg_AC_nfe']) + (1/inputGBA['WT_cases']) + (1/inputGBA['WT_gnomADg_nfe'])) --> doesn't work
# Z-Score
inputGBA_OR_SE_Z = inputGBA_OR_SE
inputGBA_OR_SE_Z['ZScore_gnomADg_nfe'] = numpy.log(inputGBA_OR_SE_Z['OR_gnomADg_nfe'])/inputGBA_OR_SE_Z['SE_gnomADg_nfe']
inputGBA_OR_SE_Z['ZScore_gnomADg'] = numpy.log(inputGBA_OR_SE_Z['OR_gnomADg'])/inputGBA_OR_SE_Z['SE_gnomADg']
inputGBA_OR_SE_Z['ZScore_CSVS'] = numpy.log(inputGBA_OR_SE_Z['OR_CSVS'])/inputGBA_OR_SE_Z['SE_CSVS']
# p-value
inputGBA_OR_SE_Z_pv = inputGBA_OR_SE_Z
inputGBA_OR_SE_Z_pv['pvalue_gnomADg_nfe'] = stats.norm.sf(abs(inputGBA_OR_SE_Z['ZScore_gnomADg_nfe']))*2 # using CCDF
inputGBA_OR_SE_Z_pv['pvalue_gnomADg'] = stats.norm.sf(abs(inputGBA_OR_SE_Z['ZScore_gnomADg']))*2
inputGBA_OR_SE_Z_pv['pvalue_CSVS'] = stats.norm.sf(abs(inputGBA_OR_SE_Z['ZScore_CSVS']))*2
### (1 - stats.norm.cdf(abs(inputGBA_OR_SE_Z['ZScore_gnomADg_nfe'])))*2 # using CDF --> same: 1 - CDF = CCDF
# FDR - number of genes
inputGBA_OR_SE_Z_pv['FDR_gnomADg_nfe'] = inputGBA_OR_SE_Z_pv['pvalue_gnomADg_nfe'] * len(inputGBA_OR_SE_Z_pv[inputGBA_OR_SE_Z_pv['gnomADg_AC_nfe'] != 0]) # number of genes in the analysis
inputGBA_OR_SE_Z_pv['FDR_gnomADg'] = inputGBA_OR_SE_Z_pv['pvalue_gnomADg'] * len(inputGBA_OR_SE_Z_pv[inputGBA_OR_SE_Z_pv['gnomADg_AC'] != 0])
inputGBA_OR_SE_Z_pv['FDR_CSVS'] = inputGBA_OR_SE_Z_pv['pvalue_CSVS'] * len(inputGBA_OR_SE_Z_pv[inputGBA_OR_SE_Z_pv['CSVS_AC'] != 0])
# Confidence interval
inputGBA_OR_SE_Z_pv_CI = inputGBA_OR_SE_Z_pv
inputGBA_OR_SE_Z_pv_CI['lowCI_gnomADg_nfe'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe']) - 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg_nfe'])
inputGBA_OR_SE_Z_pv_CI['highCI_gnomADg_nfe'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg_nfe']) + 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg_nfe'])
inputGBA_OR_SE_Z_pv_CI['lowCI_gnomADg'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg']) - 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg'])
inputGBA_OR_SE_Z_pv_CI['highCI_gnomADg'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_gnomADg']) + 1.96*inputGBA_OR_SE_Z_pv['SE_gnomADg'])
inputGBA_OR_SE_Z_pv_CI['lowCI_CSVS'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_CSVS']) - 1.96*inputGBA_OR_SE_Z_pv['SE_CSVS'])
inputGBA_OR_SE_Z_pv_CI['highCI_CSVS'] = numpy.exp(numpy.log(inputGBA_OR_SE_Z_pv['OR_CSVS']) + 1.96*inputGBA_OR_SE_Z_pv['SE_CSVS'])
# Reorder columns
colsOrderFinal = ['SYMBOL', 'AC_cases', 'WT_cases', 'gnomADg_AC_nfe', 'WT_gnomADg_nfe', 'OR_gnomADg_nfe', 'SE_gnomADg_nfe', 'ZScore_gnomADg_nfe', 'pvalue_gnomADg_nfe', 'FDR_gnomADg_nfe', 'lowCI_gnomADg_nfe', 'highCI_gnomADg_nfe', 'gnomADg_AC', 'WT_gnomADg', 'OR_gnomADg', 'SE_gnomADg', 'ZScore_gnomADg', 'pvalue_gnomADg', 'FDR_gnomADg', 'lowCI_gnomADg', 'highCI_gnomADg', 'CSVS_AC', 'WT_CSVS', 'OR_CSVS', 'SE_CSVS', 'ZScore_CSVS', 'pvalue_CSVS', 'FDR_CSVS', 'lowCI_CSVS', 'highCI_CSVS']
GBA = inputGBA_OR_SE_Z_pv_CI[colsOrderFinal]
# Filter by FDR < 0.05 in each database and combining them
GBA_nfe = GBA[GBA['FDR_gnomADg_nfe'] < 0.05]
GBA_gnomAD = GBA[GBA['FDR_gnomADg'] < 0.05]
GBA_CSVS = GBA[GBA['FDR_CSVS'] < 0.05]
GBA_gnomAD_all = GBA[(GBA['FDR_gnomADg_nfe'] < 0.05) & (GBA['FDR_gnomADg'] < 0.05)]
GBA_all = GBA[(GBA['FDR_gnomADg_nfe'] < 0.05) & (GBA['FDR_gnomADg'] < 0.05) & (GBA['FDR_CSVS'] < 0.05)]
# Print the results of the GBA
print('The GBA is done. Filtering by FDR < 0.05 for the following databases, the next number of genes were enriched:')
print('gnomAD NFE: ' + str(len(GBA_nfe)) + ' genes')
print('gnomAD all population: ' + str(len(GBA_gnomAD)) | |
pyuvdata.
"""
pol_labels = ['NS','EW']
plt.subplots_adjust(wspace=0.25)
uv = UVData()
uv.read_uvh5(file)
baseline_groups = get_baseline_groups(uv,use_ants="auto")
freqs = uv.freq_array[0]/1000000
loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m')
obstime_start = Time(uv.time_array[0],format='jd',location=loc)
startTime = obstime_start.sidereal_time('mean').hour
JD = int(obstime_start.jd)
j = 0
fig, axs = plt.subplots(len(baseline_groups),2,figsize=(12,4*len(baseline_groups)))
for orientation in baseline_groups:
bls = baseline_groups[orientation]
usable = 0
for i in range(len(bls)):
ants = uv.baseline_to_antnums(bls[i])
if ants[0] in badAnts or ants[1] in badAnts:
continue
if ants[0] in use_ants and ants[1] in use_ants:
usable += 1
if usable <=4:
use_all = True
print(f'Note: not enough baselines of orientation {orientation} - using all available baselines')
elif usable <= 10:
print(f'Note: only a small number of baselines of orientation {orientation} are available')
use_all = False
else:
use_all = False
for p in range(len(pols)):
inter=False
intra=False
pol = pols[p]
for i in range(len(bls)):
ants = uv.baseline_to_antnums(bls[i])
ant1 = ants[0]
ant2 = ants[1]
if (ant1 in use_ants and ant2 in use_ants) or use_all == True:
# key1 = 'HH%i:A' % (ant1)
# n1 = x[key1].get_part_from_type('node')['E<ground'][1:]
# key2 = 'HH%i:A' % (ant2)
# n2 = x[key2].get_part_from_type('node')['E<ground'][1:]
dat = np.mean(np.abs(uv.get_data(ant1,ant2,pol)),0)
auto1 = np.mean(np.abs(uv.get_data(ant1,ant1,pol)),0)
auto2 = np.mean(np.abs(uv.get_data(ant2,ant2,pol)),0)
norm = np.sqrt(np.multiply(auto1,auto2))
dat = np.divide(dat,norm)
if ant1 in badAnts or ant2 in badAnts:
continue
# if n1 == n2:
# if intra is False:
# axs[j][p].plot(freqs,dat,color='blue',label='intranode')
# intra=True
# else:
# axs[j][p].plot(freqs,dat,color='blue')
# else:
# if inter is False:
# axs[j][p].plot(freqs,dat,color='red',label='internode')
# inter=True
# else:
# axs[j][p].plot(freqs,dat,color='red')
axs[j][p].plot(freqs,dat,color='blue')
axs[j][p].set_yscale('log')
axs[j][p].set_title('%s: %s pol' % (orientation,pol_labels[p]))
if j == 0:
axs[len(baseline_groups)-1][p].set_xlabel('Frequency (MHz)')
if p == 0:
axs[j][p].legend()
axs[j][0].set_ylabel('log(|Vij|)')
axs[j][1].set_yticks([])
j += 1
fig.suptitle('Visibility spectra (JD: %i)' % (JD))
fig.subplots_adjust(top=.94,wspace=0.05)
plt.show()
plt.close()
def plot_antenna_positions(uv, badAnts=[],use_ants='auto'):
"""
Plots the positions of all antennas that have data, colored by node.
Parameters
----------
uv: UVData object
Observation to extract antenna numbers and positions from
badAnts: List
A list of flagged or bad antennas. These will be outlined in black in the plot.
flaggedAnts: Dict
A dict of antennas flagged by ant_metrics with value corresponding to color in ant_metrics plot
"""
plt.figure(figsize=(12,10))
if badAnts == None:
badAnts = []
all_ants = uv.antenna_numbers
# nodes, antDict, inclNodes = generate_nodeDict(uv)
# N = len(inclNodes)
cmap = plt.get_cmap('tab20')
i = 0
# nodePos = geo_sysdef.read_nodes()
# antPos = geo_sysdef.read_antennas()
# ants = geo_sysdef.read_antennas()
# nodes = geo_sysdef.read_nodes()
# firstNode = True
firstAnt = True
for i,a in enumerate(all_ants):
width = 0
widthf = 0
if a in badAnts:
width = 2
x = uv.antenna_positions[i,0]
y = uv.antenna_positions[i,1]
if a in use_ants:
falpha = 0.5
else:
falpha = 0.1
if firstAnt:
if a in badAnts:
if falpha == 0.1:
plt.plot(x,y,marker="h",markersize=40,alpha=falpha,color='b',
markeredgecolor='black',markeredgewidth=0)
plt.annotate(a, [x-1, y])
continue
plt.plot(x,y,marker="h",markersize=40,alpha=falpha,color='b',
markeredgecolor='black',markeredgewidth=0)
if a in badAnts:
plt.plot(x,y,marker="h",markersize=40,color='b',
markeredgecolor='black',markeredgewidth=width, markerfacecolor="None")
else:
if falpha == 0.1:
plt.plot(x,y,marker="h",markersize=40,alpha=falpha,color='b',
markeredgecolor='black',markeredgewidth=0)
plt.annotate(a, [x-1, y])
continue
plt.plot(x,y,marker="h",markersize=40,alpha=falpha,color='b',
markeredgecolor='black',markeredgewidth=width)
firstAnt = False
else:
plt.plot(x,y,marker="h",markersize=40,alpha=falpha,color='b',
markeredgecolor='black',markeredgewidth=0)
if a in badAnts and a in use_ants:
plt.plot(x,y,marker="h",markersize=40,color='b',
markeredgecolor='black',markeredgewidth=width, markerfacecolor="None")
plt.annotate(a, [x-1, y])
plt.xlabel('East')
plt.ylabel('North')
plt.show()
plt.close()
def plot_lst_coverage(uvd):
"""
Plots the LST and JD coverage for a particular night.
Parameters
----------
uvd: UVData Object
Object containing a whole night of data, used to extract the time array.
"""
lsts = uvd.lst_array*3.819719
jds = np.unique(uvd.time_array)
alltimes = np.arange(np.floor(jds[0]),np.ceil(jds[0]),jds[2]-jds[1])
df = jds[2]-jds[1]
truetimes = [np.min(np.abs(jds-jd))<=df*0.6 for jd in alltimes]
usetimes = np.tile(np.asarray(truetimes),(20,1))
fig = plt.figure(figsize=(20,2))
ax = fig.add_subplot()
im = ax.imshow(usetimes, aspect='auto',cmap='RdYlGn',vmin=0,vmax=1,interpolation='nearest')
fig.colorbar(im)
ax.set_yticklabels([])
ax.set_yticks([])
if len(alltimes) <= 15:
xticks = [int(i) for i in np.linspace(0,len(alltimes)-1,len(alltimes))]
else:
xticks = [int(i) for i in np.linspace(0,len(alltimes)-1,14)]
ax.set_xticks(xticks)
ax.set_xticklabels(np.around(alltimes[xticks],2))
ax.set_xlabel('JD')
ax.set_title('LST (hours)')
ax2 = ax.twiny()
ax2.set_xticks(xticks)
jds = alltimes[xticks]
lstlabels = []
loc = EarthLocation.from_geocentric(*uvd.telescope_location, unit='m')
for jd in jds:
t = Time(jd,format='jd',location=loc)
lstlabels.append(t.sidereal_time('mean').hour)
ax2.set_xticklabels(np.around(lstlabels,2))
ax2.set_label('LST (hours)')
ax2.tick_params(labelsize=12)
plt.show()
plt.close()
def calcEvenOddAmpMatrix(sm,pols=['xx','yy'],nodes='auto', badThresh=0.25, plotRatios=False):
"""
Calculates a matrix of phase correlations between antennas, where each pixel is calculated as (even/abs(even)) * (conj(odd)/abs(odd)), and then averaged across time and frequency.
Paramters:
---------
sm: UVData Object
Sum observation.
df: UVData Object
Diff observation. Must be the same time of observation as sm.
pols: List
Polarizations to plot. Can include any polarization strings accepted by pyuvdata.
nodes: String or List
Nodes to include in matrix. Default is 'auto', which generates a list of all nodes included in the provided data files.
badThresh: Float
Threshold correlation metric value to use for flagging bad antennas.
Returns:
-------
data: Dict
Dictionary containing calculated values, formatted as data[polarization][ant1,ant2].
badAnts: List
List of antennas that were flagged as bad based on badThresh.
"""
nants = len(sm.get_ants())
data = {}
antnumsAll = sorted(sm.get_ants())
badAnts = []
for p in range(len(pols)):
pol = pols[p]
data[pol] = np.empty((nants,nants))
for i in range(len(antnumsAll)):
thisAnt = []
for j in range(len(antnumsAll)):
ant1 = antnumsAll[i]
ant2 = antnumsAll[j]
dat = sm.get_data(ant1,ant2,pol)
even = dat[::2,:]
# print('Even')
# print(even)
odd = dat[1::2,:]
# s = sm.get_data(ant1,ant2,pol)
# d = df.get_data(ant1,ant2,pol)
# even = (s + d)/2
even = np.divide(even,np.abs(even))
# print('Even norm')
# print(even)
# odd = (s - d)/2
odd = np.divide(odd,np.abs(odd))
product = np.multiply(even,np.conj(odd))
# print('product')
# print(product)
data[pol][i,j] = np.abs(np.nanmean(product))
thisAnt.append(np.abs(np.mean(product)))
pgood = np.count_nonzero(~np.isnan(thisAnt))/len(thisAnt)
if (np.nanmedian(thisAnt) < badThresh or pgood<0.2) and antnumsAll[i] not in badAnts:
if pol[0]==pol[1]:
#Don't assign bad ants based on cross pols
badAnts.append(antnumsAll[i])
if plotRatios is True:
if len(pols) == 4:
data['xx-xy'] = np.subtract(data['xx'],data['xy'])
data['xx-yx'] = np.subtract(data['xx'],data['yx'])
data['yy-xy'] = np.subtract(data['yy'],data['xy'])
data['yy-yx'] = np.subtract(data['yy'],data['yx'])
else:
print('Can only calculate differences if cross pols were specified')
polAnts = {}
badAnts = []
subs = ['xx-xy','xx-yx','yy-xy','yy-yx']
for k in subs:
for i,ant in enumerate(antnumsAll):
dat = data[k][i,:]
if np.nanmedian(dat) < 0:
if ant in polAnts.keys():
polAnts[ant] = polAnts[ant] + 1
else:
polAnts[ant] = 1
if polAnts[ant] == 4:
badAnts.append(ant)
return data, badAnts
def plotCorrMatrix(uv,data,pols=['xx','yy'],vminIn=0,vmaxIn=1,nodes='auto',logScale=False,plotRatios=False):
"""
Plots a matrix representing the phase correlation of each baseline.
Parameters:
----------
uv: UVData Object
Observation used for calculating the correlation metric
data: Dict
Dictionary containing the correlation metric for each baseline and each polarization. Formatted as data[polarization] [ant1,ant2]
pols: List
Polarizations to plot. Can include any polarization strings accepted by pyuvdata.
vminIn: float
Lower limit of colorbar. Default is 0.
vmaxIn: float
Upper limit of colorbar. Default is 1.
nodes: Dict
Dictionary containing the nodes (and their constituent antennas) to include in the matrix. Formatted as nodes[Node #][Ant List, Snap # List, Snap Location List].
logScale: Bool
Option to put colormap on a logarithmic scale. Default is False.
"""
# if nodes=='auto':
# nodeDict, antDict, inclNodes = generate_nodeDict(uv)
nantsTotal = len(uv.get_ants())
power = np.empty((nantsTotal,nantsTotal))
fig, axs = plt.subplots(2,2,figsize=(16,16))
dirs = ['NN','EE','NE','EN']
cmap='plasma'
if plotRatios is True:
pols = ['xx-xy','yy-xy','xx-yx','yy-yx']
dirs=pols
vminIn=-1
cmap='seismic'
loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m')
jd = uv.time_array[0]
t = Time(jd,format='jd',location=loc)
lst = round(t.sidereal_time('mean').hour,2)
t.format='fits'
antnumsAll = sorted(uv.get_ants())
i = 0
for p in range(len(pols)):
if p >= 2:
i=1
pol = pols[p]
nants = len(antnumsAll)
if logScale is True:
im = axs[i][p%2].imshow(data[pol],cmap=cmap,origin='upper',extent=[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],norm=LogNorm(vmin=vminIn, vmax=vmaxIn))
else:
im = axs[i][p%2].imshow(data[pol],cmap=cmap,origin='upper',extent=[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],vmin=vminIn, vmax=vmaxIn)
axs[i][p%2].set_xticks(np.arange(0,nantsTotal)+1)
axs[i][p%2].set_xticklabels(antnumsAll,rotation=90,fontsize=6)
axs[i][p%2].xaxis.set_ticks_position('top')
axs[i][p%2].set_title('polarization: ' + dirs[p] + '\n')
# n=0
# n=0
# for node in sorted(inclNodes):
# n += len(nodeDict[node]['ants'])
# axs[0][1].text(nantsTotal+1,nantsTotal-n+len(nodeDict[node]['ants'])/2,node)
# axs[1][1].text(nantsTotal+1,nantsTotal-n+len(nodeDict[node]['ants'])/2,node)
# axs[0][1].text(1.05,0.4,'Node Number',rotation=270,transform=axs[0][1].transAxes)
axs[0][1].set_yticklabels([])
axs[0][1].set_yticks([])
axs[0][0].set_yticks(np.arange(nantsTotal,0,-1))
axs[0][0].set_yticklabels(antnumsAll,fontsize=6)
axs[0][0].set_ylabel('Antenna Number')
# axs[1][1].text(1.05,0.4,'Node Number',rotation=270,transform=axs[1][1].transAxes)
axs[1][1].set_yticklabels([])
axs[1][1].set_yticks([])
axs[1][0].set_yticks(np.arange(nantsTotal,0,-1))
axs[1][0].set_yticklabels(antnumsAll,fontsize=6)
axs[1][0].set_ylabel('Antenna Number')
cbar_ax = fig.add_axes([0.98,0.18,0.015,0.6])
cbar_ax.set_xlabel('|V|', rotation=0)
cbar = fig.colorbar(im, cax=cbar_ax)
fig.suptitle('Correlation Matrix - JD: %s, LST: %.0fh' % (str(jd),np.round(lst,0)))
fig.subplots_adjust(top=1.28,wspace=0.05,hspace=1.1)
fig.tight_layout(pad=2)
plt.show()
plt.close()
def plot_single_matrix(uv,data,vminIn=0,vmaxIn=1,nodes='auto',logScale=False):
if nodes=='auto':
nodeDict, antDict, inclNodes = generate_nodeDict(uv)
nantsTotal = len(uv.get_ants())
power = np.empty((nantsTotal,nantsTotal))
fig, axs = plt.subplots(1,1,figsize=(16,16))
loc = EarthLocation.from_geocentric(*uv.telescope_location, unit='m')
jd = uv.time_array[0]
t = Time(jd,format='jd',location=loc)
lst = round(t.sidereal_time('mean').hour,2)
t.format='fits'
antnumsAll = sort_antennas(uv)
nants = len(antnumsAll)
if logScale is True:
im = axs[0][0].imshow(data[pol],cmap='plasma',origin='upper',
extent=[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],norm=LogNorm(vmin=vminIn, vmax=vmaxIn))
else:
im = axs[0][0].imshow(data[pol],cmap='plasma',origin='upper',extent=
[0.5,nantsTotal+.5,0.5,nantsTotal+0.5],vmin=vminIn, vmax=vmaxIn)
axs[0][0].set_xticks(np.arange(0,nantsTotal)+1)
axs[0][0].set_xticklabels(antnumsAll,rotation=90,fontsize=6)
axs[0][0].xaxis.set_ticks_position('top')
axs[0][0].set_title('polarization: ' + dirs[p] + '\n')
n=0
for node in sorted(inclNodes):
n += len(nodeDict[node]['ants'])
axs[0][0].axhline(len(antnumsAll)-n+.5,lw=4)
axs[0][0].axvline(n+.5,lw=4)
axs[0][0].text(n-len(nodeDict[node]['ants'])/2,-.5,node)
axs[0][0].text(.42,-.05,'Node Number',transform=axs[0][0].transAxes)
n=0
for node in sorted(inclNodes):
n += len(nodeDict[node]['ants'])
axs[0][0].text(nantsTotal+1,nantsTotal-n+len(nodeDict[node]['ants'])/2,node)
axs[0][0].text(1.05,0.4,'Node Number',rotation=270,transform=axs[0][0].transAxes)
axs[0][0].set_yticks(np.arange(nantsTotal,0,-1))
axs[0][0].set_yticklabels(antnumsAll,fontsize=6)
axs[0][0].set_ylabel('Antenna Number')
axs[0][0].text(1.05,0.4,'Node Number',rotation=270,transform=axs[0][0].transAxes)
cbar_ax = fig.add_axes([0.98,0.18,0.015,0.6])
cbar_ax.set_xlabel('|V|', rotation=0)
cbar = fig.colorbar(im, cax=cbar_ax)
fig.suptitle('Correlation Matrix - JD: %s, LST: %.0fh' % (str(jd),np.round(lst,0)))
fig.subplots_adjust(top=1.28,wspace=0.05,hspace=1.1)
fig.tight_layout(pad=2)
plt.show()
plt.close()
def get_hourly_files(uv, HHfiles, jd):
"""
Generates a list of files spaced one hour apart throughout a night of observation, and the times those files were observed.
Parameters:
----------
| |
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
"""mureilbuilder.py collects functions that build a MUREIL simulation.
The intended use from the top-level is to call build_master with the command-line flags,
which will process any configuration files (identified as -f file), and any command-line
overrides, as listed in the read_flags function.
"""
import sys, os
import ConfigParser
import argparse
from tools import mureilbase, mureilexception
import importlib
import logging
import string
import copy
import types
import ast
import numpy
logger = logging.getLogger(__name__)
def build_master(raw_flags, extra_data = None):
"""Build the simulation master, using the flags from the command line or
elsewhere. Intended to be called from runmureil.py or other simple function.
An exception of base type MureilException will be raised in case of error.
Inputs:
raw_flags: flags from command line sys.argv[1:], or a list of strings
such as ['-f', 'config.txt']
extra_data: arbitrary extra data, if required.
Outputs:
master: a completely configured simulation master, ready to run.
"""
files, conf_list = read_flags(raw_flags)
full_config = accum_config_files(files)
master = create_master_instance(full_config, conf_list, extra_data)
return master
def read_config_file(filename):
"""Take in a filename and parse the file sections to a nested dict object.
Keyword arguments:
filename -- a string for the filename to read
Returns:
config -- a nested dict containing the sections (identified by []) in the configuration file, with
each section dict containing its parameters.
"""
parsed_config = ConfigParser.RawConfigParser()
read_file = parsed_config.read(filename)
# ConfigParser.read_file returns a list of filenames read
if read_file == []:
msg = 'Configuration file ' + filename + ' not opened.'
logger.critical(msg)
raise mureilexception.ConfigException(msg, {})
config = {}
for section in parsed_config.sections():
this_list = {}
for item in parsed_config.items(section):
this_list[item[0]] = item[1]
config[section] = this_list
return config
def read_flags(flags, alt_args_list=None):
"""Process the command-line flags.
This:
1) collects a list of configuration file names
2) processes the logging flags, and initialises the logger
3) collects the remaining flags which modify the simulation configuration
Inputs:
flags: a list of strings, for example ['-f', 'config.txt'], as would
come from sys.argv[1:] as command-line arguments. Further
details below.
alt_args_list: optional. A dict specifying parameters that modify
the configuration extracted from the files. A default is provided.
Further details below.
Outputs:
files: a list of configuration filenames
conf_list: a list of tuples of format ((section, param_name), param_value)
as extracted from the flags using the args list.
Details on flags:
-f filename or --file filename: filename of a configuration file. Any number
of these can be specified and will be applied in order of listing.
Logging: see do_logger_setup below for more details.
-l filename or --logfile filename: filename of the logfile.
-d level or --debuglevel level: set the debuglevel.
--logmodulenames: if set (no value needed), log extra information.
Default extra arguments:
--iterations number: Set the number of iterations
--seed number: Set the random seed for the simulation.
--pop_size number: Set the population size, if a genetic algorithm.
--processes number: Number of processes to spawn for parallel processing
--output_file filename: Name of file to write output to
--do_plots {True|False}: Draw pretty pictures when done
--run_periods periods: Set the periods to run in a multi-period sim. Surround
the list of periods in double-quotes e.g. --run_periods "2010 2020".
Details on alt_args_list format:
args_list is a dict.
The key is the command line argument. At the command line, type
--name value, e.g. --iterations 10
The value is a tuple identifying where in the configuration to find the
parameter to be modified, format (object, param_name).
object is 'Master' for the master, or the name in the first position in
the corresponding tuple in the master's get_config_spec for the others,
for example 'algorithm' (note not 'Algorithm').
The param_name is the name of the parameter to modify as listed in the
object's get_config_spec function.
"""
if alt_args_list:
args_list = alt_args_list
else:
# See notes in docstring for function about args_list format
args_list = {'iterations': ('Master', 'iterations'),
'seed': ('algorithm', 'seed'),
'pop_size': ('algorithm', 'pop_size'),
'optim_type': ('Master', 'optim_type'),
'processes': ('algorithm', 'processes'),
'output_file' : ('Master', 'output_file'),
'do_plots' : ('Master', 'do_plots'),
'run_periods': ('Master', 'run_periods')}
parser = argparse.ArgumentParser()
for arg in args_list:
parser.add_argument('--' + arg)
parser.add_argument('-f', '--file', action='append')
parser.add_argument('-l', '--logfile')
parser.add_argument('-d', '--debuglevel')
parser.add_argument('--logmodulenames', action='store_true', default=False)
args = parser.parse_args(flags)
dict_args = vars(args)
logger_config = {}
for arg in ['logfile', 'debuglevel', 'logmodulenames']:
logger_config[arg] = dict_args.pop(arg)
do_logger_setup(logger_config)
files = dict_args.pop('file')
conf_list = []
# Build up a list of ((section, param_name), value) tuples to
# describe the modifications to the configuration.
for item in dict_args.keys():
val = dict_args[item]
if val is not None:
conf_tup = (args_list[item], val)
conf_list.append(conf_tup)
return files, conf_list
def do_logger_setup(logger_config):
"""Set up the simulation logger.
Inputs:
logger_config: a dict with members (all optional) of:
debuglevel: the Python logging level - defaults to INFO. One of
DEBUG, INFO, WARNING, ERROR, CRITICAL.
logmodulenames: if True, log the name of the module that the log
message is sourced from.
logfile: the full path of the log file.
Outputs:
None
"""
for arg in ['debuglevel', 'logmodulenames', 'logfile']:
if arg not in logger_config:
logger_config[arg] = None
# First, remove any existing handlers
root = logging.getLogger()
if root.handlers:
handlers = copy.copy(root.handlers)
for handler in handlers:
root.removeHandler(handler)
if logger_config['debuglevel'] is None:
debuglevel = 'INFO'
else:
debuglevel = logger_config['debuglevel']
numeric_level = getattr(logging, debuglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid debug level: %s' % debuglevel)
# Now create the new handler
if logger_config['logmodulenames']:
format_string = '%(levelname)-8s : %(name)s : %(message)s'
else:
format_string = '%(levelname)-8s : %(message)s'
formatter = logging.Formatter(format_string)
if logger_config['logfile'] is not None:
handler = logging.FileHandler(logger_config['logfile'], mode='w')
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(numeric_level)
def accum_config_files(files):
"""Build a configuration structure from the list of files provided.
Inputs:
files: a list of string filenames, which are applied in turn, with the
later files overwriting parameters already present.
Outputs:
config: a nested dict, with each section, identified by the [section_name]
in the file, and each section a dict of parameter : value, where
each value is a string.
File format example (shortened - see each module's documentation and
get_config_spec function for details of parameters):
[Master]
model: master.simplemureilmaster.SimpleMureilMaster
iterations: 1000
algorithm: Algorithm
[Algorithm]
model: algorithm.geneticalgorithm.Engine
base_mute: 0.01
processes: 0
seed: 12345
and the resulting output:
config = {'Master': {'model': 'master.simplemureilmaster.SimpleMureilMaster',
'iterations': '1000', 'algorithm': 'Algorithm'}, 'Algorithm': {'model':
'algorithm.geneticalgorithm.Engine', 'base_mute': '0.01', 'processes': '0',
'seed': '12345'}}
"""
config = {}
for conf_file in files:
next_conf = read_config_file(conf_file)
for section in next_conf.items():
if section[0] in config:
config[section[0]].update(section[1])
else:
config[section[0]] = section[1]
return config
def apply_flags(full_config, flags):
"""Apply the modifiers are defined in the flags to the specified parameters within
the full_config structure.
Inputs:
full_config: a nested dict - see accum_config_files above.
flags: the conf_list output of read_flags above. Format is list of
((section, param_name), value)
Outputs:
None. full_config is modified in-place.
"""
for flag in flags:
pair, value = flag
section, param = pair
if (section == 'Master'):
if param in full_config['Master']:
full_config['Master'][param] = value
else:
msg = ('Flag ' + flag + ' | |
## values = numpy.copy(value_array) # we do not wish to change the original value_array in case it needs to be reused in user code
## for cell, val in zip(self, values):
## cell.set_parameters(**{parametername: val})
## elif len(value_array.shape) == len(self.dim[0:self.actual_ndim])+1: # the values are themselves 1D arrays
## for cell,addr in zip(self.ids(), self.addresses()):
## val = value_array[addr]
## setattr(cell, parametername, val)
## else:
## raise errors.InvalidDimensionsError
##def rset(self, parametername, rand_distr):
## """
## 'Random' set. Set the value of parametername to a value taken from
## rand_distr, which should be a RandomDistribution object.
## """
## """
## Will be implemented in the future more efficiently for
## NativeRNGs.
## """
## rarr = numpy.array(rand_distr.next(n=self.size))
## rarr = rarr.reshape(self.dim[0:self.actual_ndim])
## self.tset(parametername, rarr)
def _call(self, methodname, arguments):
"""
Calls the method methodname(arguments) for every cell in the population.
e.g. p.call("set_background","0.1") if the cell class has a method
set_background().
"""
""" This works nicely for PCSIM for simulator specific cells,
because cells (SimObject classes) are directly wrapped in python """
for i in xrange(0, len(self)):
obj = simulator.net.object(self.pcsim_population[i])
if obj: apply( obj, methodname, (), arguments)
def _tcall(self, methodname, objarr):
"""
`Topographic' call. Calls the method methodname() for every cell in the
population. The argument to the method depends on the coordinates of the
cell. objarr is an array with the same dimensions as the Population.
e.g. p.tcall("memb_init", vinitArray) calls
p.cell[i][j].memb_init(vInitArray[i][j]) for all i, j.
"""
""" PCSIM: iteration at the python level and apply"""
for i in xrange(0, len(self)):
obj = simulator.net.object(self.pcsim_population[i])
if obj: apply( obj, methodname, (), arguments)
PopulationView = common.PopulationView
Assembly = common.Assembly
class Projection(common.Projection, WDManager):
"""
A container for all the connections of a given type (same synapse type and
plasticity mechanisms) between two populations, together with methods to set
parameters of those connections, including of plasticity mechanisms.
"""
nProj = 0
def __init__(self, presynaptic_population, postsynaptic_population,
method, source=None,
target=None, synapse_dynamics=None, label=None, rng=None):
"""
presynaptic_population and postsynaptic_population - Population objects.
source - string specifying which attribute of the presynaptic cell
signals action potentials
target - string specifying which synapse on the postsynaptic cell to
connect to
If source and/or target are not given, default values are used.
method - a Connector object, encapsulating the algorithm to use for
connecting the neurons.
synapse_dynamics - a `SynapseDynamics` object specifying which
synaptic plasticity mechanisms to use.
rng - specify an RNG object to be used by the Connector..
"""
"""
PCSIM implementation specific comments:
- source parameter does not have any meaning in context of PyPCSIM interface. Action potential
signals are predefined by the neuron model and each cell has only one source,
so there is no need to name a source since is implicitly known.
- rng parameter is also not currently not applicable. For connection making only internal
random number generators can be used.
- The semantics of the target parameter is slightly changed:
If it is a string then it represents a pcsim synapse class.
If it is an integer then it represents which target(synapse) on the postsynaptic cell
to connect to.
It can be also a pcsim SimObjectFactory object which will be used for creation
of the synapse objects associated to the created connections.
"""
common.Projection.__init__(self, presynaptic_population, postsynaptic_population,
method, source, target,
synapse_dynamics, label, rng)
self.is_conductance = self.post.conductance_based
if isinstance(self.post, Assembly):
assert self.post._homogeneous_synapses
celltype = self.post.populations[0].celltype
else:
celltype = self.post.celltype
self.synapse_shape = ("alpha" in celltype.__class__.__name__) and "alpha" or "exp"
### Determine connection decider
##decider, wiring_method, weight, delay = method.connect(self)
##
##weight = self.getWeight(weight)
##self.is_conductance = hasattr(self.post.pcsim_population.object(0),'ErevExc')
##
##if isinstance(weight, pyNN.random.RandomDistribution) or hasattr(weight, '__len__'):
## w = 1.
##else:
## w = self.convertWeight(weight, self.is_conductance)
##
##delay = self.getDelay(delay)
##if isinstance(delay, pyNN.random.RandomDistribution) or hasattr(delay, '__len__'):
## d = simulator.state.min_delay/1000.
##else:
## d = self.convertDelay(delay)
##
# handle synapse dynamics
if core.is_listlike(method.weights):
w = method.weights[0]
elif hasattr(method.weights, "next"): # random distribution
w = 0.0 # actual value used here shouldn't matter. Actual values will be set in the Connector.
elif isinstance(method.weights, basestring):
w = 0.0 # actual value used here shouldn't matter. Actual values will be set in the Connector.
elif hasattr(method.weights, 'func_name'):
w = 0.0 # actual value used here shouldn't matter. Actual values will be set in the Connector.
else:
w = method.weights
if core.is_listlike(method.delays):
d = min(method.delays)
elif hasattr(method.delays, "next"): # random distribution
d = get_min_delay() # actual value used here shouldn't matter. Actual values will be set in the Connector.
elif isinstance(method.delays, basestring):
d = get_min_delay() # actual value used here shouldn't matter. Actual values will be set in the Connector.
elif hasattr(method.delays, 'func_name'):
d = 0.0 # actual value used here shouldn't matter. Actual values will be set in the Connector.
else:
d = method.delays
plasticity_parameters = {}
if self.synapse_dynamics:
# choose the right model depending on whether we have conductance- or current-based synapses
if self.is_conductance:
possible_models = get_synapse_models("Cond")
else:
possible_models = get_synapse_models("Curr").union(get_synapse_models("CuBa"))
if self.synapse_shape == 'alpha':
possible_models = possible_models.intersection(get_synapse_models("Alpha"))
else:
possible_models = possible_models.intersection(get_synapse_models("Exp")).difference(get_synapse_models("DoubleExp"))
if not self.is_conductance and self.synapse_shape is "exp":
possible_models.add("StaticStdpSynapse")
possible_models.add("StaticSpikingSynapse")
possible_models.add("DynamicStdpSynapse")
possible_models.add("DynamicSpikingSynapse")
# we need to know the synaptic time constant, which is a property of the
# post-synaptic cell in PyNN. Here, we get it from the Population initial
# value, but this is a problem if tau_syn varies from cell to cell
if target in (None, 'excitatory'):
tau_syn = self.post.celltype.parameters['TauSynExc']
if self.is_conductance:
e_syn = self.post.celltype.parameters['ErevExc']
elif target == 'inhibitory':
tau_syn = self.post.celltype.parameters['TauSynInh']
if self.is_conductance:
e_syn = self.post.celltype.parameters['ErevInh']
else:
raise Exception("Currently, target must be one of 'excitatory', 'inhibitory' with dynamic synapses")
if self.is_conductance:
plasticity_parameters.update(Erev=e_syn)
weight_scale_factor = 1e-6
else:
weight_scale_factor = 1e-9
if self.synapse_dynamics.fast:
possible_models = possible_models.intersection(self.synapse_dynamics.fast.possible_models)
plasticity_parameters.update(self.synapse_dynamics.fast.parameters)
# perhaps need to ensure that STDP is turned off here, to be turned back on by the next block
else:
possible_models = possible_models.difference(dynamic_synapse_models) # imported from synapses module
if self.synapse_dynamics.slow:
possible_models = possible_models.intersection(self.synapse_dynamics.slow.possible_models)
plasticity_parameters.update(self.synapse_dynamics.slow.all_parameters)
dendritic_delay = self.synapse_dynamics.slow.dendritic_delay_fraction * d
transmission_delay = d - dendritic_delay
plasticity_parameters.update({'back_delay': 2*0.001*dendritic_delay, 'Winit': w*weight_scale_factor})
# hack to work around the limitations of the translation method
if self.is_conductance:
for name in self.synapse_dynamics.slow.weight_dependence.scales_with_weight:
plasticity_parameters[name] *= 1e3 # a scale factor of 1e-9 is always applied in the translation stage
else:
possible_models = possible_models.difference(stdp_synapse_models)
plasticity_parameters.update({'W': w*weight_scale_factor})
if len(possible_models) == 0:
raise errors.NoModelAvailableError("The synapse model requested is not available.")
synapse_type = getattr(pypcsim, list(possible_models)[0])
try:
self.syn_factory = synapse_type(delay=d, tau=tau_syn,
**plasticity_parameters)
except Exception, err:
err.args = ("%s\nActual arguments were: delay=%g, tau=%g, plasticity_parameters=%s" % (err.message, d, tau_syn, plasticity_parameters),) + err.args[1:]
raise
else:
if not target:
self.syn_factory = pypcsim.SimpleScalingSpikingSynapse(1, w, d)
elif isinstance(target, int):
self.syn_factory = pypcsim.SimpleScalingSpikingSynapse(target, w, d)
else:
if isinstance(target, str):
if target == 'excitatory':
self.syn_factory = pypcsim.SimpleScalingSpikingSynapse(1, w, d)
elif target == 'inhibitory':
self.syn_factory = pypcsim.SimpleScalingSpikingSynapse(2, w, d)
else:
target = eval(target)
self.syn_factory = target({})
else:
self.syn_factory = target
##self.pcsim_projection = pypcsim.ConnectionsProjection(self.pre.pcsim_population, self.post.pcsim_population,
## self.syn_factory, decider, wiring_method, collectIDs = True,
## collectPairs=True)
##
########## Should be removed and better implemented by using
### the fact that those random Distribution can be passed directly
### while the network is build, and not set after...
##if isinstance(weight, pyNN.random.RandomDistribution):
## self.randomizeWeights(weight)
##elif hasattr(weight, '__len__'):
## assert len(weight) == len(self), "Weight array does not have the same number of elements as the Projection %d != %d" % (len(weight),len(self))
## self.setWeights(weight)
##
##if isinstance(delay, pyNN.random.RandomDistribution):
## self.randomizeDelays(delay)
##elif hasattr(delay, '__len__'):
## assert len(delay) == len(self), "Weight array does not have the same number of elements as the Projection %d != %d" % (len(weight),len(self))
## self.setDelays(delay)
##self.synapse_type = self.syn_factory #target or 'excitatory'
self.synapse_type = target or 'excitatory'
self.connection_manager = simulator.ConnectionManager(self.syn_factory, parent=self)
self.connections = self.connection_manager
method.connect(self)
Projection.nProj += 1
# The commented-out code in this class has been left there as it may be
# useful when we start (re-)optimizing the implementation
##def __len__(self):
## | |
'<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861811481':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861811482':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861818218':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861811483':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861818212':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861811484':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861818210':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861818211':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861818216':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861818217':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861818214':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861818215':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861818814':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861818815':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861818816':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861818817':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861818810':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861811486':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861818812':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861818813':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861811487':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861818818':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'861818819':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861816320':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861816321':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861816322':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861816323':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861816324':{'en': 'Heyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861816325':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861816326':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861816327':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861816328':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861816329':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'86182450':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86182451':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'86182452':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'86182453':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'86182454':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'86182455':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'86182457':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861827668':{'en': 'Hechi, Guangxi', 'zh': u('\u5e7f\u897f\u6cb3\u6c60\u5e02')},
'86182459':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861829557':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861829556':{'en': 'Guyuan, Ningxia', 'zh': u('\u5b81\u590f\u56fa\u539f\u5e02')},
'861829551':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861829550':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861829553':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')},
'861829552':{'en': 'Shizuishan, Ningxia', 'zh': u('\u5b81\u590f\u77f3\u5634\u5c71\u5e02')},
'86181812':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u51c9\u5c71\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'86181819':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861826288':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861826289':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861826284':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861826285':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861826286':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861826287':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861826280':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861826281':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861826282':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861826283':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861812445':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861812444':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812447':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861812446':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861812441':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861812440':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812443':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'861812442':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861821942':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861821943':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861821940':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861821941':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812449':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'861812448':{'en': 'Heyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861821944':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861821945':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861810574':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861810573':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861810572':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861821810':{'en': 'Shaoguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861810571':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861821811':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861810570':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861821812':{'en': 'Heyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861823012':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861820945':{'en': 'Jinchang, Gansu', 'zh': u('\u7518\u8083\u7701\u91d1\u660c\u5e02')},
'861823010':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861821813':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861823016':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861823017':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861819028':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861819029':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861819026':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')},
'861819027':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861819024':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861819025':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')},
'861819022':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7518\u5b5c\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861819023':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861819020':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u9042\u5b81\u5e02')},
'861819021':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7518\u5b5c\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861811233':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811232':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811231':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811230':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811237':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811236':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811235':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811234':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811239':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811238':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861820943':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861820942':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861810973':{'en': 'Hu<NAME>hai', 'zh': u('\u9752\u6d77\u7701\u9ec4\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861810972':{'en': 'Haidong, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u4e1c\u5730\u533a')},
'861810971':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861810970':{'en': 'Haibei, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u5317\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861825654':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861817257':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861810976':{'en': 'Yushu, Qinghai', 'zh': u('\u9752\u6d77\u7701\u7389\u6811\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861817255':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861810758':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861817259':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861817258':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'86182768':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861811941':{'en': 'Lan<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861811940':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861811943':{'en': 'B<NAME>', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861811942':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861811945':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861811944':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861811499':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811498':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811497':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811496':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861811495':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861811494':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861811493':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861811492':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861811491':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811490':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861820097':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'86182765':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861820764':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86182762':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861820767':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86182763':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'861810014':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861810015':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861810016':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861810017':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861810010':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'861810011':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'861810012':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861810013':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861816917':{'en': 'Guyuan, Ningxia', 'zh': u('\u5b81\u590f\u56fa\u539f\u5e02')},
'861816916':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')},
'861810548':{'en': 'T<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861810549':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861810018':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861810019':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861816911':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861816910':{'en': '<NAME>', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861813895':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861813894':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813897':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861813896':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861813891':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813890':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813893':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813892':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861820762':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861813899':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861813898':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861814205':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861814204':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861814207':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861814206':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861814201':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861814200':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861814203':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861814202':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861814209':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861814208':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861809939':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861809938':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861809935':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861809934':{'en': 'Hami, Xinjiang', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861825744':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861821995':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861813099':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861813098':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861828099':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5b89\u5e02')},
'861821994':{'en': 'Q<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861813095':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861813094':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861813097':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861813096':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861813091':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861813090':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861813093':{'en': 'Hotan, Xinjiang', 'zh': u('\u65b0\u7586\u548c\u7530\u5730\u533a')},
'861813092':{'en': 'Hotan, Xinjiang', 'zh': u('\u65b0\u7586\u548c\u7530\u5730\u533a')},
'861821996':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5f20\u6396\u5e02')},
'861812328':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812329':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812324':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812325':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812326':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812327':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812320':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861812321':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812322':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812323':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861821993':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e73\u51c9\u5e02')},
'861821992':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'86181319':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'86181648':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'86181649':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'86181312':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'86181313':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'86181310':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'86181311':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86181640':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'86181642':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'86181315':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861823845':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861823844':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861823847':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861823846':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861823841':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861823840':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861823843':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861823842':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861823849':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861823848':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861815499':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u548c\u7530\u5730\u533a')},
'861815498':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861815493':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861815492':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861815491':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861815490':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861815497':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861815496':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861815495':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861815494':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861813639':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861813638':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861813631':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861813630':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861813633':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861813632':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861813635':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861813634':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861813637':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861813636':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861809795':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809794':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809797':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809796':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809791':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861809790':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861809793':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809792':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861809799':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809798':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861815679':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')},
'861815678':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')},
'861815677':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')},
'861815676':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')},
'861815675':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861815674':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861815673':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861815672':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861815671':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861815670':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861816081':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'86182669':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'86182668':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'86182849':{'en': 'Guangyuan, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5143\u5e02')},
'861816080':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'86182661':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'86182660':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'86182843':{'en': 'Leshan, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u4e50\u5c71\u5e02')},
'861816083':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
| |
<gh_stars>0
#!/usr/bin/python3
# PiPhone - A DIY Cellphone based on Raspberry Pi
# This must run as root (sudo python lapse.py) due to framebuffer, etc.
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
#
# Prerequisite tutorials: aside from the basic Raspbian setup and PiTFT setup
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
#
# piphone.py by <NAME> (<EMAIL>)
# based on cam.py by <NAME> / Paint Your Dragon for Adafruit Industries.
# BSD license, all text above must be included in any redistribution.
# python3 adaptation by <NAME> ( <EMAIL>)
import atexit
import _pickle as cPickle
import errno
import fnmatch
import io
import os
import pygame
import threading
from pygame.locals import *
from subprocess import call
from time import sleep
from datetime import datetime, timedelta
import serial
import sys
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.items():
if key == 'color':
self.color = value
elif key == 'bg':
self.bg = value
elif key == 'fg':
self.fg = value
elif key == 'cb':
self.callback = value
elif key == 'value':
self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None:
self.callback()
else:
self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0] + (self.rect[2] - self.iconBg.bitmap.get_width()) / 2,
self.rect[1] + (self.rect[3] - self.iconBg.bitmap.get_height()) / 2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0] + (self.rect[2] - self.iconFg.bitmap.get_width()) / 2,
self.rect[1] + (self.rect[3] - self.iconFg.bitmap.get_height()) / 2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def numericCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
global numberstring
global phonecall
if n == "+" and screenMode == 0:
numberstring = numberstring + str(n)
elif n < 10 and screenMode == 0:
numberstring = numberstring + str(n)
elif n == 10 and screenMode == 0:
numberstring = numberstring[:-1]
elif n == 12:
# if phonecall == 0:
if screenMode == 0:
if len(numberstring) > 0:
print(("Calling " + numberstring));
serialport.write("AT\r".encode())
response = serialport.readlines(None)
serialport.write("AT+QPCMV=1,2\r".encode())
response = serialport.readlines(None)
callstr = "ATD " + numberstring + ';\r'
serialport.write(callstr.encode())
response = serialport.readlines(None)
print(response)
# phonecall = 1
screenMode = 1
else:
print("Hanging Up...")
serialport.write("AT\r".encode())
response = serialport.readlines(None)
serialport.write("ATH\r".encode())
response = serialport.readlines(None)
serialport.write("AT+QPCMV=0\r".encode())
response = serialport.readlines(None)
print(response)
# phonecall = 0
screenMode = 0
exit(0)
if len(numberstring) > 0:
numeric = int(numberstring)
v[dict_idx] = numeric
# Global stuff -------------------------------------------------------------
busy = False
threadExited = False
screenMode = 0 # Current screen mode; default = viewfinder
phonecall = 1
screenModePrior = -1 # Prior screen mode (for detecting changes)
iconPath = '/opt/setere/piphone/icons' # Subdirectory containing UI bitmaps (PNG format)
numeric = 0 # number from numeric keypad
numberstring = ""
motorRunning = 0
motorDirection = 0
returnScreen = 0
shutterpin = 17
motorpinA = 18
motorpinB = 27
motorpin = motorpinA
currentframe = 0
framecount = 100
settling_time = 0.2
shutter_length = 0.2
interval_delay = 0.2
dict_idx = "Interval"
v = {"Pulse": 100,
"Interval": 3000,
"Images": 150}
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen 0 for numeric input
[Button((30, 0, 320, 60), bg='box'),
Button((30, 60, 60, 60), bg='1', cb=numericCallback, value=1),
Button((90, 60, 60, 60), bg='2', cb=numericCallback, value=2),
Button((150, 60, 60, 60), bg='3', cb=numericCallback, value=3),
Button((30, 110, 60, 60), bg='4', cb=numericCallback, value=4),
Button((90, 110, 60, 60), bg='5', cb=numericCallback, value=5),
Button((150, 110, 60, 60), bg='6', cb=numericCallback, value=6),
Button((30, 160, 60, 60), bg='7', cb=numericCallback, value=7),
Button((90, 160, 60, 60), bg='8', cb=numericCallback, value=8),
Button((150, 160, 60, 60), bg='9', cb=numericCallback, value=9),
Button((30, 210, 60, 60), bg='star', cb=numericCallback, value=0),
Button((90, 210, 60, 60), bg='0', cb=numericCallback, value=0),
Button((150, 210, 60, 60), bg='hash', cb=numericCallback, value=0),
Button((180, 260, 60, 60), bg='del2', cb=numericCallback, value=10),
Button((90, 260, 60, 60), bg='call', cb=numericCallback, value=12)],
# Screen 1 for numeric input
[Button((30, 0, 320, 60), bg='box'),
Button((90, 260, 60, 60), bg='hang', cb=numericCallback, value=12)]
]
# Assorted utility functions -----------------------------------------------
def saveSettings():
global v
try:
outfile = open('piphone.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
pickle.dump(v, outfile)
outfile.close()
except:
pass
def loadSettings():
global v
try:
infile = open('piphone.pkl', 'rb')
v = pickle.load(infile)
infile.close()
except:
pass
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
# os.putenv('SDL_VIDEODRIVER', 'fbcon')
# os.putenv('SDL_FBDEV' , '/dev/fb1')
# os.putenv('SDL_MOUSEDRV' , 'TSLIB')
# os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Init pygame and screen
print("Initting...")
pygame.init()
print("Setting Mouse invisible...")
# pygame.mouse.set_visible(False)
print("Setting fullscreen...")
# modes = pygame.display.list_modes(16)
# screen = pygame.display.set_mode(modes[0], FULLSCREEN, 16)
screen_width = 240
screen_height = 320
screen = pygame.display.set_mode([screen_width, screen_height])
pygame.display.set_caption("Phone")
print("Loading Icons...")
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
print("Assigning Buttons")
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
print("Load Settings")
loadSettings() # Must come last; fiddles with Button/Icon states
print("loading background..")
img = pygame.image.load("icons/PiPhone.png")
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((240 - img.get_width()) / 2,
(320 - img.get_height()) / 2))
pygame.display.update()
sleep(2)
print("Initialising Modem..")
serialport = serial.Serial("/dev/ttyUSB10", 115200, timeout=0.5)
serialport.write("AT\r".encode())
response = serialport.readlines(None)
serialport.write("ATE0\r".encode())
response = serialport.readlines(None)
serialport.write("AT\r".encode())
response = serialport.readlines(None)
print(response)
################parsing arguments
try:
print(sys.argv[1])
numbr = sys.argv[1].split(":")[1]
print(numbr)
except:
print("no args")
numbr = ""
numbr = numbr.replace('"', "")
numbr = numbr.replace("'", "")
numbr = numbr.replace("+7", "8")
numbr = numbr.replace(" ", "")
for ch in numbr:
numericCallback(int(ch))
# Main loop ----------------------------------------------------------------
print("mainloop..")
while (True):
# Process touchscreen input
while True:
screen_change = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
try:
print("Hanging Up...")
serialport.write("AT\r".encode())
response = serialport.readlines(None)
serialport.write("ATH\r".encode())
response = serialport.readlines(None)
serialport.write("AT+QPCMV=0\r".encode())
response = serialport.readlines(None)
print(response)
except:
print("...failed")
pygame.quit()
exit(0)
if (event.type == pygame.MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
screen_change = | |
self.announce_to(self.channel, text, now)
def announce_to(self, dest, text, now=False):
"Announce to a specific destination (nick or channel)."
new_text = "%s%s%s:%s %s" % (LBLUE, self.__class__.__name__, WHITE, LGRAY, text)
self.send_to(dest, new_text, now)
def send(self, text, now=False):
"""
Send a message to the game's channel. Set now to bypass supybot's
queue, sending the message immediately.
"""
self.send_to(self.channel, text, now)
def send_to(self, dest, text, now=False):
"Send to a specific destination (nick or channel)."
method = self.irc.sendMsg if now else self.irc.queueMsg
method(ircmsgs.privmsg(dest, text))
def handle_message(self, msg):
"Handle incoming messages on the channel."
pass
class Boggle(BaseGame):
"The Boggle game implementation."
BOARD_SIZE = 4
FREQUENCY_TABLE = {
19: "E",
13: "T",
12: "AR",
11: "INO",
9: "S",
6: "D",
5: "CHL",
4: "FMPU",
3: "GY",
2: "W",
1: "BJKQVXZ",
}
POINT_VALUES = {
3: 1,
4: 1,
5: 2,
6: 3,
7: 5,
}
MAX_POINTS = 11 # 8 letters or longer
MESSAGES = {
"chat": "%s%%(nick)s%s says: %%(text)s" % (WHITE, LGRAY),
"joined": "%s%%(nick)s%s joined the game." % (WHITE, LGRAY),
"gameover": ("%s::: Time's Up :::%s Check %s%%(channel)s%s " + "for results.")
% (LRED, LGRAY, WHITE, LGRAY),
"players": "Current Players: %(players)s",
"ready": "%sGet Ready!" % WHITE,
"result": (
"%s%%(nick)s%s %%(verb)s %s%%(points)d%s " + "point%%(plural)s (%%(words)s)"
)
% (WHITE, LGRAY, LGREEN, LGRAY),
"startup": (
"Starting in %%(seconds)d seconds, "
+ 'use "%s%%(commandChar)sboggle%s" to play!'
)
% (WHITE, LGRAY),
"stopped": "Game stopped.",
"stopped2": "%s::: Game Stopped :::%s" % (LRED, LGRAY),
"warning": "%s%%(seconds)d%s seconds remaining..." % (LYELLOW, LGRAY),
"welcome1": (
"%s::: New Game :::%s (%s%%(difficulty)s%s: "
+ "%s%%(min_length)d%s letters or longer)"
)
% (LGREEN, LGRAY, WHITE, LGRAY, WHITE, LGRAY),
"welcome2": ("%s%%(nick)s%s, write your answers here, e.g.: " + "cat dog ...")
% (WHITE, LGRAY),
}
class State:
PREGAME = 0
READY = 1
ACTIVE = 2
DONE = 3
class PlayerResult:
"Represents result for a single player."
def __init__(self, player, unique=None, dup=None):
self.player = player
self.unique = unique if unique else set()
self.dup = dup if dup else set()
def __eq__(self, other):
return (self.get_score()) == (other.get_score())
def __ne__(self, other):
return (self.get_score()) != (other.get_score())
def __lt__(self, other):
return (self.get_score()) < (other.get_score())
def __le__(self, other):
return (self.get_score()) <= (other.get_score())
def __gt__(self, other):
return (self.get_score()) > (other.get_score())
def __ge__(self, other):
return (self.get_score()) >= (other.get_score())
def __repr__(self):
return "%s %s" % (self.get_score(), other.get_score())
def get_score(self):
score = 0
for word in self.unique:
score += Boggle.POINT_VALUES.get(len(word), Boggle.MAX_POINTS)
return score
def render_words(self, longest_len=0):
"Return the words in this result, colorized appropriately."
words = sorted(list(self.unique) + list(self.dup))
words_text = ""
last_color = LGRAY
for word in words:
color = LCYAN if word in self.unique else GRAY
if color != last_color:
words_text += color
last_color = color
if len(word) == longest_len:
word += LYELLOW + "*"
last_color = LYELLOW
words_text += "%s " % word
if not words_text:
words_text = "%s-none-" % (GRAY)
words_text = words_text.strip() + LGRAY
return words_text
class Results:
"Represents results for all players."
def __init__(self):
self.player_results = {}
def add_player_words(self, player, words):
unique = set()
dup = set()
for word in words:
bad = False
for result in list(self.player_results.values()):
if word in result.unique:
result.unique.remove(word)
result.dup.add(word)
bad = True
elif word in result.dup:
bad = True
if bad:
dup.add(word)
else:
unique.add(word)
self.player_results[player] = Boggle.PlayerResult(player, unique, dup)
def sorted_results(self):
return sorted(list(self.player_results.values()), reverse=True)
def __init__(self, words, irc, channel, nick, delay, duration, difficulty):
# See tech note in the WordGames class.
self.parent = super(Boggle, self)
self.parent.__init__(words, irc, channel)
self.delay = delay
self.duration = duration
self.difficulty = difficulty
self.max_targets = get_max_targets(irc)
self._handle_difficulty()
self.board = self._generate_board()
self.event_name = "Boggle.%d" % id(self)
self.init_time = time.time()
self.longest_len = len(max(self.board.solutions, key=len))
self.starter = nick
self.state = Boggle.State.PREGAME
self.players = []
self.player_answers = {}
self.warnings = [30, 10, 5]
while self.warnings[0] >= duration:
self.warnings = self.warnings[1:]
def guess(self, nick, text):
# This can't happen right now, but it might be useful some day
if nick not in self.players:
self.join(nick)
# Pre-game messages are relayed as chatter (not treated as guesses)
if self.state < Boggle.State.ACTIVE:
self._broadcast("chat", self.players, nick=nick, text=text)
return
guesses = set(map(str.lower, text.split()))
accepted = [s for s in guesses if s in self.board.solutions]
rejected = [s for s in guesses if s not in self.board.solutions]
if len(accepted) > 3:
message = "%sGreat!%s" % (LGREEN, WHITE)
elif len(accepted) > 0:
message = "%sOk!" % WHITE
else:
message = "%sOops!%s" % (RED, LGRAY)
if accepted:
message += " You got: %s%s" % (" ".join(sorted(accepted)), LGRAY)
self.player_answers[nick].update(accepted)
if rejected:
message += " (not accepted: %s)" % " ".join(sorted(rejected))
self.send_to(nick, message)
def join(self, nick):
assert self.is_running()
assert self.state != Boggle.State.DONE
if nick not in self.players:
self._broadcast(
"welcome1",
[nick],
now=True,
difficulty=Difficulty.name(self.difficulty),
min_length=self.min_length,
)
self._broadcast("welcome2", [nick], now=True, nick=nick)
self._broadcast("joined", self.players, nick=nick)
self.players.append(nick)
self.player_answers[nick] = set()
if self.state == Boggle.State.ACTIVE:
self._display_board(nick)
else:
self._broadcast("players", [nick])
# Keep at least 5 seconds on the pre-game clock if someone joins
if self.state == Boggle.State.PREGAME:
time_left = self.init_time + self.delay - time.time()
if time_left < 5:
self.delay += 5 - time_left
self._schedule_next_event()
else:
self.send("%s: You have already joined the game." % nick)
def show(self):
# Not sure if this is really useful.
# if self.state == Boggle.State.ACTIVE:
# self._display_board(self.channel)
pass
def solve(self):
self.announce("Solutions: " + " ".join(sorted(self.board.solutions)))
def start(self):
self.parent.start()
self._broadcast("startup", [self.channel], True, seconds=self.delay)
self.join(self.starter)
self._schedule_next_event()
def stop(self, now=False):
self.parent.stop()
self.state = Boggle.State.DONE
try:
schedule.removeEvent(self.event_name)
except KeyError:
pass
if not now:
self._broadcast("stopped", [self.channel])
self._broadcast("stopped2", self.players)
def stats(self):
assert self.state == Boggle.State.DONE
points = 0
for word in self.board.solutions:
points += Boggle.POINT_VALUES.get(len(word), Boggle.MAX_POINTS)
longest_words = [w for w in self.board.solutions if len(w) == self.longest_len]
self.announce(
"There were %s%d%s possible words, with total point"
" value %s%d%s. The longest word%s: %s%s%s."
% (
WHITE,
len(self.board.solutions),
LGRAY,
LGREEN,
points,
LGRAY,
" was" if len(longest_words) == 1 else "s were",
LCYAN,
(LGRAY + ", " + LCYAN).join(longest_words),
LGRAY,
)
)
def _broadcast_text(self, text, recipients=None, now=False):
"""
Broadcast the given string message to the recipient list (default is
all players, not the game channel). Set now to bypass Supybot's queue
and send the message immediately.
"""
if recipients is None:
recipients = self.players
for i in range(0, len(recipients), self.max_targets):
targets = ",".join(recipients[i : i + self.max_targets])
self.announce_to(targets, text, now)
def _broadcast(self, name, recipients=None, now=False, **kwargs):
"""
Broadcast the message named by 'name' using the constants defined
in MESSAGES to the specified recipient list. If recipients is
unspecified, default is all players (game channel not included).
Keyword args should be provided for any format substitution in this
particular message.
"""
# Automatically provide some dictionary values
kwargs["channel"] = self.channel
kwargs["commandChar"] = str(conf.supybot.reply.whenAddressedBy.chars)[0]
kwargs["players"] = "%s%s%s" % (
WHITE,
(LGRAY + ", " + WHITE).join(self.players),
LGRAY,
)
if "points" in kwargs:
kwargs["plural"] = "" if kwargs["points"] == 1 else "s"
formatted = Boggle.MESSAGES[name] % kwargs
self._broadcast_text(formatted, recipients, now)
def _handle_difficulty(self):
self.min_length = {
Difficulty.EASY: 3,
Difficulty.MEDIUM: 4,
Difficulty.HARD: 5,
Difficulty.EVIL: 6,
}[self.difficulty]
def _get_ready(self):
self.state = Boggle.State.READY
self._broadcast("ready", now=True)
self._schedule_next_event()
def _begin_game(self):
self.state = Boggle.State.ACTIVE
self.start_time = time.time()
self.end_time = self.start_time + self.duration
self._display_board()
self._schedule_next_event()
def _schedule_next_event(self):
"""
(Re)schedules the next game event (start, time left warning, end)
as appropriate.
"""
# Unschedule any previous event
try:
schedule.removeEvent(self.event_name)
except KeyError:
pass
if self.state == Boggle.State.PREGAME:
# Schedule "get ready" message
schedule.addEvent(
self._get_ready, self.init_time + self.delay, self.event_name
)
elif self.state == Boggle.State.READY:
# Schedule game start
schedule.addEvent(
self._begin_game, self.init_time + self.delay + 3, self.event_name
)
elif self.state == Boggle.State.ACTIVE:
if self.warnings:
# Warn almost half a second early, in case there is a little
# latency before the event is triggered. (Otherwise a 30 second
# warning sometimes shows up as 29 seconds remaining.)
warn_time = self.end_time - self.warnings[0] - 0.499
schedule.addEvent(self._time_warning, warn_time, self.event_name)
self.warnings = self.warnings[1:]
else:
# Schedule game end
schedule.addEvent(self._end_game, self.end_time, self.event_name)
def _time_warning(self):
seconds = round(self.start_time + self.duration - time.time())
self._broadcast("warning", now=True, seconds=seconds)
self._schedule_next_event()
def _end_game(self):
self.gameover()
self.state = Boggle.State.DONE
# Compute results
results = Boggle.Results()
for player, answers in self.player_answers.items():
results.add_player_words(player, answers)
# Notify players
for result in list(results.player_results.values()):
self._broadcast("gameover", | |
metadata in metadata_gen:
wrs_polygon = metadata.get_wrs_polygon()
wrs_shape = shapely.wkb.loads(wrs_polygon)
unioned_beast = unioned_beast.union(wrs_shape)
self.assertTrue(unioned_beast.contains(area_shape))
class TestMetadata(unittest.TestCase):
def test_bounds(self):
row = ('LC80330352017072LGN00', '', 'LANDSAT_8', 'OLI_TIRS', '2017-03-13', '2017-03-13T17:38:14.0196140Z',
'PRE', 'N/A', 'L1T', 33, 35, 1.2, 37.10422, 34.96178, -106.85883, -104.24596, 1067621299,
'gs://gcp-public-data-landsat/LC08/PRE/033/035/LC80330352017072LGN00')
metadata = Metadata(row)
self.assertIsNotNone(metadata)
geom_wkb = metadata.get_wrs_polygon()
self.assertIsNotNone(geom_wkb)
bounding_polygon = box(*metadata.bounds)
wrs_polygon = shapely.wkb.loads(geom_wkb)
self.assertTrue(bounding_polygon.contains(wrs_polygon))
# polygon = loads(wkt)
# self.assertEqual(polygon.wkt, wkt)
# self.assertEqual(polygon.bounds, metadata.bounds)
# self.assertTrue(True)
def test_interesct(self):
self.assertTrue(True)
def test_epsg_codes(self):
self.assertEqual(32601, Metadata.get_utm_epsg_code(-180, 45))
self.assertEqual(32701, Metadata.get_utm_epsg_code(-180, -45))
self.assertEqual(32601, Metadata.get_utm_epsg_code(-174, 45))
self.assertEqual(32701, Metadata.get_utm_epsg_code(-174, -45))
self.assertEqual(32602, Metadata.get_utm_epsg_code(-173.99, 45))
self.assertEqual(32702, Metadata.get_utm_epsg_code(-173.99, -45))
self.assertEqual(32602, Metadata.get_utm_epsg_code(-168, 45))
self.assertEqual(32702, Metadata.get_utm_epsg_code(-168, -45))
self.assertEqual(32603, Metadata.get_utm_epsg_code(-167.99, 45))
self.assertEqual(32703, Metadata.get_utm_epsg_code(-167.99, -45))
self.assertEqual(32603, Metadata.get_utm_epsg_code(-162, 45))
self.assertEqual(32703, Metadata.get_utm_epsg_code(-162, -45))
self.assertEqual(32660, Metadata.get_utm_epsg_code(180, 45))
self.assertEqual(32760, Metadata.get_utm_epsg_code(180, -45))
self.assertEqual(32660, Metadata.get_utm_epsg_code(174.00001, 45))
self.assertEqual(32760, Metadata.get_utm_epsg_code(174.00001, -45))
self.assertEqual(32659, Metadata.get_utm_epsg_code(174, 45))
self.assertEqual(32759, Metadata.get_utm_epsg_code(174, -45))
self.assertEqual(32659, Metadata.get_utm_epsg_code(168.00001, 45))
self.assertEqual(32759, Metadata.get_utm_epsg_code(168.00001, -45))
def test_utm_epsg(self):
row = ('LC80330352017072LGN00', '', 'LANDSAT_8', 'OLI_TIRS', '2017-03-13', '2017-03-13T17:38:14.0196140Z',
'PRE', 'N/A', 'L1T', 33, 35, 1.2, 37.10422, 34.96178, -106.85883, -104.24596, 1067621299,
'gs://gcp-public-data-landsat/LC08/PRE/033/035/LC80330352017072LGN00')
metadata = Metadata(row)
self.assertIsNotNone(metadata)
self.assertEqual(32613, metadata.utm_epsg_code)
def test_doy(self):
row = ('LC80330352017072LGN00_FAKED', '', 'LANDSAT_8', 'OLI_TIRS', '2017-11-04', '2017-11-04T17:38:14.0196140Z',
'PRE', 'N/A', 'L1T', 33, 35, 1.2, 37.10422, 34.96178, -106.85883, -104.24596, 1067621299,
'gs://gcp-public-data-landsat/LC08/PRE/033/035/LC80330352017072LGN00_FAKE')
metadata = Metadata(row)
self.assertIsNotNone(metadata)
self.assertEqual(308, metadata.doy)
class TestBandMap(unittest.TestCase):
def test_landsat_5(self):
band_map = BandMap(SpacecraftID.LANDSAT_5)
self.assertEqual(band_map.get_number(Band.BLUE), 1)
self.assertEqual(band_map.get_number(Band.SWIR2), 7)
self.assertEqual(band_map.get_number(Band.THERMAL), 6)
self.assertEqual(band_map.get_band_enum(1), Band.BLUE)
self.assertEqual(band_map.get_band_enum(7), Band.SWIR2)
self.assertEqual(band_map.get_band_enum(6), Band.THERMAL)
for idx, val in enumerate([Band.BLUE, Band.GREEN, Band.RED, Band.NIR, Band.SWIR1]):
self.assertEqual(band_map.get_band_enum(idx + 1), val)
self.assertEqual(band_map.get_number(val), idx + 1)
self.assertEqual(band_map.get_resolution(val), 30.0)
def test_landsat_5_exceptions(self):
band_map_2 = BandMap(SpacecraftID.LANDSAT_7)
self.assertEqual(band_map_2.get_number(Band.PANCHROMATIC), 8)
self.assertEqual(band_map_2.get_band_enum(8), Band.PANCHROMATIC)
self.assertEqual(band_map_2.get_resolution(Band.PANCHROMATIC), 15.0)
band_map = BandMap(SpacecraftID.LANDSAT_5)
self.assertRaises(KeyError, lambda: band_map.get_number(Band.CIRRUS))
self.assertRaises(KeyError, lambda: band_map.get_number(Band.PANCHROMATIC))
self.assertRaises(KeyError, lambda: band_map.get_band_enum(8))
def test_landsat_7(self):
band_map = BandMap(SpacecraftID.LANDSAT_7)
self.assertEqual(band_map.get_number(Band.BLUE), 1)
self.assertEqual(band_map.get_number(Band.SWIR1), 5)
self.assertEqual(band_map.get_number(Band.THERMAL), 6)
self.assertEqual(band_map.get_number(Band.PANCHROMATIC), 8)
self.assertEqual(band_map.get_band_enum(8), Band.PANCHROMATIC)
self.assertEqual(band_map.get_resolution(Band.PANCHROMATIC), 15.0)
self.assertEqual(band_map.get_band_enum(1), Band.BLUE)
self.assertEqual(band_map.get_band_enum(5), Band.SWIR1)
self.assertEqual(band_map.get_band_enum(6), Band.THERMAL)
self.assertEqual(band_map.get_number(Band.SWIR2), 7)
self.assertEqual(band_map.get_band_enum(7), Band.SWIR2)
for idx, val in enumerate([Band.BLUE, Band.GREEN, Band.RED, Band.NIR, Band.SWIR1]):
self.assertEqual(band_map.get_band_enum(idx + 1), val)
self.assertEqual(band_map.get_number(val), idx + 1)
self.assertEqual(band_map.get_resolution(val), 30.0)
def test_landsat_7_exceptions(self):
band_map = BandMap(SpacecraftID.LANDSAT_7)
self.assertRaises(KeyError, lambda: band_map.get_number(Band.CIRRUS))
self.assertRaises(KeyError, lambda: band_map.get_number(Band.TIRS1))
self.assertRaises(KeyError, lambda: band_map.get_band_enum(9))
def test_landsat_8(self):
band_map = BandMap(SpacecraftID.LANDSAT_8)
self.assertEqual(band_map.get_number(Band.ULTRA_BLUE), 1)
self.assertEqual(band_map.get_number(Band.BLUE), 2)
self.assertEqual(band_map.get_number(Band.SWIR1), 6)
self.assertEqual(band_map.get_band_enum(2), Band.BLUE)
self.assertEqual(band_map.get_band_enum(6), Band.SWIR1)
self.assertEqual(band_map.get_number(Band.SWIR2), 7)
self.assertEqual(band_map.get_band_enum(7), Band.SWIR2)
for idx, val in enumerate([Band.BLUE, Band.GREEN, Band.RED, Band.NIR, Band.SWIR1]):
self.assertEqual(band_map.get_band_enum(idx + 2), val)
self.assertEqual(band_map.get_number(val), idx + 2)
self.assertEqual(band_map.get_number(Band.CIRRUS), 9)
self.assertEqual(band_map.get_number(Band.TIRS1), 10)
self.assertEqual(band_map.get_number(Band.TIRS2), 11)
self.assertEqual(band_map.get_resolution(Band.PANCHROMATIC), 15.0)
self.assertEqual(band_map.get_band_enum(9), Band.CIRRUS)
self.assertEqual(band_map.get_band_enum(10), Band.TIRS1)
self.assertEqual(band_map.get_band_enum(11), Band.TIRS2)
def test_landsat_8_exceptions(self):
band_map = BandMap(SpacecraftID.LANDSAT_8)
self.assertRaises(KeyError, lambda: band_map.get_number(Band.THERMAL))
self.assertRaises(KeyError, lambda: band_map.get_band_enum(12))
class TestDataType(unittest.TestCase):
def test_bitwise(self):
a = DataType.UINT32
b = DataType.UINT16
class TestWRSGeometries(unittest.TestCase):
test_cases = [[15.74326, 26.98611, 1, 1, 1, 0, 13001, 13001, 13, 1, 'D', 1, 2233],
[2.74362, 6.65058, 942, 942, 1, 0, 61198, 61198, 61, 198, 'A', 1, 3174],
[13.37321, 24.20422, 2225, 2225, 1, 0, 125241, 125241, 125, 241, 'A', 2, 4209],
[3.58953, 7.7865, 1021, 1021, 1, 0, 75029, 75029, 75, 29, 'D', 3, 10445],
[4.2424, 8.69453, 891, 891, 1, 0, 64147, 64147, 64, 147, 'A', 6, 21227],
[16.81754, 27.20801, 3720, 3720, 1, 0, 223248, 223248, 223, 248, 'D', 16, 56296]]
wrs_geometries = WRSGeometries()
def test_geometry(self):
for test_case in self.test_cases:
geom_wkb = self.wrs_geometries.get_wrs_geometry(test_case[8], test_case[9])
geom_expected_area = test_case[0]
self.assertIsNotNone(geom_wkb)
s = shapely.wkb.loads(geom_wkb)
self.assertAlmostEqual(geom_expected_area, s.area, 5)
def test_belgium(self):
r = requests.get("https://raw.githubusercontent.com/johan/world.geo.json/master/countries/BEL.geo.json")
area_geom = r.json()
area_shape = shapely.geometry.shape(area_geom['features'][0]['geometry'])
bounds_set = None
while bounds_set is None:
bounds_set = self.wrs_geometries.get_path_row((2.513573, 49.529484, 6.156658, 51.475024))
for path_row in bounds_set:
geom_wkb = self.wrs_geometries.get_wrs_geometry(path_row[0], path_row[1])
s = shapely.wkb.loads(geom_wkb)
b_intersect = s.envelope.intersects(area_shape.envelope)
self.assertTrue(b_intersect)
# filehandler = open("/.epl/wrs_geom.obj", "wb")
# import pickle
# pickle.dump(self.wrs_geometries, filehandler)
# filehandler.close()
def test_bounds_search(self):
for idx, test_case in enumerate(self.test_cases):
geom_wkb = self.wrs_geometries.get_wrs_geometry(test_case[8], test_case[9])
original_shape = shapely.wkb.loads(geom_wkb)
result = self.wrs_geometries.get_path_row(original_shape.bounds)
path_pair = result.pop()
while path_pair is not None:
geom_wkb = self.wrs_geometries.get_wrs_geometry(path_pair[0], path_pair[1])
s = shapely.wkb.loads(geom_wkb)
b_intersect = s.envelope.intersects(original_shape.envelope)
if not b_intersect:
print("Test case {0}\n original bounds: {1}\nnon-intersecting bounds{2}\n".format(idx,
original_shape.bounds,
s.bounds))
self.assertTrue(b_intersect, "Test case {0}\n original bounds: {1}\nnon-intersecting bounds{2}\n"
.format(idx, original_shape.bounds, s.bounds))
if result:
path_pair = result.pop()
else:
break
class TestLandsat(unittest.TestCase):
base_mount_path = '/imagery'
metadata_service = None
metadata_set = []
r = requests.get("https://raw.githubusercontent.com/johan/world.geo.json/master/countries/USA/NM/Taos.geo.json")
taos_geom = r.json()
taos_shape = shapely.geometry.shape(taos_geom['features'][0]['geometry'])
def setUp(self):
d_start = date(2017, 3, 12) # 2017-03-12
d_end = date(2017, 3, 19) # 2017-03-20, epl api is inclusive
self.metadata_service = MetadataService()
landsat_filters = LandsatQueryFilters()
landsat_filters.collection_number.set_value("PRE")
landsat_filters.acquired.set_range(d_start, True, d_end, True)
landsat_filters.aoi.set_bounds(*self.taos_shape.bounds)
metadata_rows = self.metadata_service.search(
SpacecraftID.LANDSAT_8,
limit=10,
data_filters=landsat_filters)
# mounted directory in docker container
base_mount_path = '/imagery'
for row in metadata_rows:
self.metadata_set.append(row)
# TODO test PRE rejection
# TODO test date range rejection
# TODO test Satellite Rejection
def test_vrt_not_pre(self):
d_start = date(2017, 6, 24)
d_end = date(2017, 9, 24)
bounding_box = (-115.927734375, 34.52466147177172, -78.31054687499999, 44.84029065139799)
# sql_filters = ['collection_number!="PRE"']
landsat_filter = LandsatQueryFilters()
landsat_filter.collection_number.set_exclude_value("PRE")
landsat_filter.acquired.set_range(d_start, True, d_end, True)
landsat_filter.aoi.set_bounds(*bounding_box)
rows = self.metadata_service.search(SpacecraftID.LANDSAT_8,
limit=1,
data_filters=landsat_filter)
rows = list(rows)
metadata = rows[0]
landsat = Landsat(metadata)
self.assertIsNotNone(landsat)
vrt = landsat.get_vrt([4, 3, 2])
self.assertIsNotNone(vrt)
dataset = landsat.get_dataset([4, 3, 2], DataType.UINT16)
self.assertIsNotNone(dataset)
def test_pixel_function_vrt_1(self):
utah_box = (-112.66342163085938, 37.738141282210385, -111.79824829101562, 38.44821130413263)
d_start = date(2016, 7, 20)
d_end = date(2016, 7, 28)
landsat_filter = LandsatQueryFilters()
landsat_filter.collection_number.set_value("PRE")
landsat_filter.cloud_cover.set_range(end=5, end_inclusive=True) #landsat_filter.cloud_cover.set_range_end(5, True)
landsat_filter.acquired.set_range(d_start, True, d_end, True)
landsat_filter.aoi.set_bounds(*utah_box)
rows = self.metadata_service.search(SpacecraftID.LANDSAT_8,
limit=10,
data_filters=landsat_filter)
rows = list(rows)
self.assertEqual(0, len(rows))
d_end = date(2016, 8, 28)
landsat_filter = LandsatQueryFilters()
landsat_filter.collection_number.set_value("PRE")
landsat_filter.cloud_cover.set_range(end=5, end_inclusive=False) #landsat_filter.cloud_cover.set_range_end(5, False)
landsat_filter.acquired.set_range(end=d_end, end_inclusive=True) #landsat_filter.acquired.set_range_end(d_end, True)
landsat_filter.acquired.sort_by(epl_imagery_pb2.DESCENDING)
landsat_filter.aoi.set_bounds(*utah_box)
rows = self.metadata_service.search(SpacecraftID.LANDSAT_8,
limit=10,
data_filters=landsat_filter)
rows = list(rows)
self.assertEqual(len(rows), 10)
# metadata_row = ['LC80390332016208LGN00', '', 'LANDSAT_8', 'OLI_TIRS', '2016-07-26',
# '2016-07-26T18:14:46.9465460Z', 'PRE', 'N/A', 'L1T', 39, 33, 1.69,
# 39.96962, 37.81744, -115.27267, -112.56732, 1070517542,
# 'gs://gcp-public-data-landsat/LC08/PRE/039/033/LC80390332016208LGN00']
metadata = rows[0]
# GDAL helper functions for generating VRT
landsat = Landsat([metadata])
# get a numpy.ndarray from bands for specified imagery
band_numbers = [4, 3, 2]
scale_params = [[0.0, 65535], [0.0, 65535], [0.0, 65535]]
nda = landsat.fetch_imagery_array(band_numbers, scale_params)
self.assertEqual(nda.shape, (3876, 3806, 3))
def test_band_enum(self):
self.assertTrue(True)
d_start = date(2016, 7, 20)
d_end = date(2016, 7, 28)
landsat_filter = LandsatQueryFilters()
landsat_filter.scene_id.set_value("LC80390332016208LGN00")
landsat_filter.acquired.set_range(d_start, True, d_end, True)
rows = self.metadata_service.search(SpacecraftID.LANDSAT_8,
limit=1,
data_filters=landsat_filter)
rows = list(rows)
metadata = rows[0]
landsat = Landsat(metadata)
scale_params = [[0.0, 65535], [0.0, 65535], [0.0, 65535]]
# nda = landsat.__get_ndarray(band_numbers, metadata, scale_params)
nda = landsat.fetch_imagery_array([Band.RED, Band.GREEN, Band.BLUE], scale_params, spatial_resolution_m=240)
self.assertIsNotNone(nda)
nda2 = landsat.fetch_imagery_array([4, 3, 2], scale_params, spatial_resolution_m=240)
np.testing.assert_almost_equal(nda, nda2)
# 'scene_id': 'LC80390332016208LGN00'
def test_vrt_extent(self):
# GDAL helper functions for generating VRT
landsat = Landsat(self.metadata_set[0])
# get a numpy.ndarray from bands for specified imagery
band_numbers = [Band.RED, Band.GREEN, Band.BLUE]
scale_params = [[0.0, 65535], [0.0, 65535], [0.0, 65535]]
vrt = landsat.get_vrt(band_numbers, envelope_boundary=self.taos_shape.bounds)
self.assertIsNotNone(vrt)
def test_cutline(self):
# GDAL helper functions for generating VRT
landsat = Landsat(self.metadata_set[0])
# get a numpy.ndarray from bands for specified imagery
band_numbers = [Band.RED, Band.GREEN, Band.BLUE]
scale_params = [[0.0, 65535], [0.0, 65535], [0.0, 65535]]
nda = landsat.fetch_imagery_array(band_numbers, scale_params, self.taos_shape.wkb, spatial_resolution_m=480)
self.assertIsNotNone(nda)
# TODO needs shape test
def test_mosaic(self):
# GDAL helper functions for generating VRT
landsat = Landsat(self.metadata_set)
# get a numpy.ndarray from bands for specified imagery
band_numbers = [Band.RED, Band.GREEN, Band.BLUE]
scale_params = [[0.0, 65535], [0.0, 65535], [0.0, 65535]]
nda = landsat.fetch_imagery_array(band_numbers, scale_params, envelope_boundary=self.taos_shape.bounds)
self.assertIsNotNone(nda)
self.assertEqual((1804, 1295, 3), nda.shape)
# TODO needs shape test
def test_mosaic_cutline(self):
# GDAL helper functions for generating VRT
landsat = Landsat(self.metadata_set)
# get a numpy.ndarray from bands for specified imagery
# 'nir', 'swir1', 'swir2'
band_numbers = [Band.NIR, Band.SWIR1, Band.SWIR2]
scaleParams = [[0.0, 40000.0], [0.0, 40000.0], [0.0, 40000.0]]
nda = landsat.fetch_imagery_array(band_numbers, scaleParams, polygon_boundary_wkb=self.taos_shape.wkb)
self.assertIsNotNone(nda)
self.assertEqual((1804, 1295, 3), nda.shape)
def test_polygon_wkb_metadata(self):
d_start = date(2017, 3, 12) # 2017-03-12
d_end = date(2017, 3, 19) # 2017-03-20, epl api is inclusive
self.metadata_service = MetadataService()
landsat_filters = LandsatQueryFilters()
landsat_filters.collection_number.set_value("PRE")
landsat_filters.acquired.set_range(d_start, True, d_end, True)
landsat_filters.aoi.set_geometry(self.taos_shape.wkb)
# landsat_filters.geometry_bag.geometry_binaries.append(self.taos_shape.wkb)
metadata_rows = self.metadata_service.search(
SpacecraftID.LANDSAT_8,
limit=10,
data_filters=landsat_filters)
metadata_set = []
for row in metadata_rows:
metadata_set.append(row)
landsat = Landsat(metadata_set)
band_numbers = [Band.NIR, Band.SWIR1, Band.SWIR2]
scaleParams = [[0.0, 40000.0], [0.0, 40000.0], [0.0, 40000.0]]
nda = landsat.fetch_imagery_array(band_numbers, scaleParams, polygon_boundary_wkb=self.taos_shape.wkb)
self.assertIsNotNone(nda)
self.assertEqual((1804, 1295, 3), nda.shape)
def test_mosaic_mem_error(self):
landsat = Landsat(self.metadata_set)
# get a numpy.ndarray from bands for specified imagery
band_numbers = [Band.RED, Band.GREEN, Band.BLUE]
scaleParams = [[0.0, 40000], [0.0, 40000], [0.0, 40000]]
nda = landsat.fetch_imagery_array(band_numbers, scaleParams, envelope_boundary=self.taos_shape.bounds)
self.assertIsNotNone(nda)
# GDAL helper functions for generating VRT
landsat = Landsat(self.metadata_set)
self.assertEqual((1804, 1295, 3), nda.shape)
# get a numpy.ndarray from bands for specified imagery
# 'nir', 'swir1', 'swir2'
band_numbers = [Band.NIR, Band.SWIR1, Band.SWIR2]
scaleParams = [[0.0, 40000.0], [0.0, 40000.0], [0.0, 40000.0]]
nda = landsat.fetch_imagery_array(band_numbers, scaleParams, | |
#! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_classes.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mediapack import from_yaml
from mediapack import Air, PEM, EqFluidJCA
from pyPLANES.utils.io import initialisation_out_files_plain
from pyPLANES.core.calculus import PwCalculus
from pyPLANES.core.multilayer import MultiLayer
from pyPLANES.pw.pw_layers import FluidLayer
from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking
Air = Air()
# def initialise_PW_solver(L, b):
# nb_PW = 0
# dofs = []
# for _layer in L:
# if _layer.medium.MODEL == "fluid":
# dofs.append(nb_PW+np.arange(2))
# nb_PW += 2
# elif _layer.medium.MODEL == "pem":
# dofs.append(nb_PW+np.arange(6))
# nb_PW += 6
# elif _layer.medium.MODEL == "elastic":
# dofs.append(nb_PW+np.arange(4))
# nb_PW += 4
# interface = []
# for i_l, _layer in enumerate(L[:-1]):
# interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL))
# return nb_PW, interface, dofs
class PwProblem(PwCalculus, MultiLayer):
"""
Plane Wave Problem
"""
def __init__(self, **kwargs):
PwCalculus.__init__(self, **kwargs)
termination = kwargs.get("termination","rigid")
self.method = kwargs.get("termination","global")
MultiLayer.__init__(self, **kwargs)
self.kx, self.ky, self.k = None, None, None
self.shift_plot = kwargs.get("shift_pw", 0.)
self.plot = kwargs.get("plot_results", [False]*6)
self.result = {}
self.outfiles_directory = False
if self.method == "global":
self.layers.insert(0,FluidLayer(Air,1.e-2))
if self.layers[1].medium.MEDIUM_TYPE == "fluid":
self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1]))
self.nb_PW = 0
for _layer in self.layers:
if _layer.medium.MODEL == "fluid":
_layer.dofs = self.nb_PW+np.arange(2)
self.nb_PW += 2
elif _layer.medium.MODEL == "pem":
_layer.dofs = self.nb_PW+np.arange(6)
self.nb_PW += 6
elif _layer.medium.MODEL == "elastic":
_layer.dofs = self.nb_PW+np.arange(4)
self.nb_PW += 4
def update_frequency(self, f):
PwCalculus.update_frequency(self, f)
MultiLayer.update_frequency(self, f, self.k, self.kx)
def create_linear_system(self, f):
self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex)
i_eq = 0
# Loop on the interfaces
for _int in self.interfaces:
if self.method == "global":
i_eq = _int.update_M_global(self.A, i_eq)
# for i_inter, _inter in enumerate(self.interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "elastic":
# if _inter[1] == "fluid":
# i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M)
# if self.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] )
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif Layers[-1].medium.MODEL == "elastic":
# i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif self.backing == "transmission":
# i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] )
self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift
self.A = np.delete(self.A, 0, axis=1)
# print(self.A)
X = LA.solve(self.A, self.F)
# print(X)
# R_pyPLANES_PW = X[0]
# if self.backing == "transmission":
# T_pyPLANES_PW = X[-2]
# else:
# T_pyPLANES_PW = 0.
# X = np.delete(X, 0)
# del(dofs[0])
# for i, _ld in enumerate(dofs):
# dofs[i] -= 2
# if self.plot:
# self.plot_sol_PW(X, dofs)
# out["R"] = R_pyPLANES_PW
# out["T"] = T_pyPLANES_PW
# return out
# class Solver_PW(PwCalculus):
# def __init__(self, **kwargs):
# PwCalculus.__init__(self, **kwargs)
# ml = kwargs.get("ml")
# termination = kwargs.get("termination")
# self.layers = []
# for _l in ml:
# if _l[0] == "Air":
# mat = Air
# else:
# mat = from_yaml(_l[0]+".yaml")
# d = _l[1]
# self.layers.append(Layer(mat,d))
# if termination in ["trans", "transmission","Transmission"]:
# self.backing = "Transmission"
# else:
# self.backing = backing.rigid
# self.kx, self.ky, self.k = None, None, None
# self.shift_plot = kwargs.get("shift_pw", 0.)
# self.plot = kwargs.get("plot_results", [False]*6)
# self.result = {}
# self.outfiles_directory = False
# initialisation_out_files_plain(self)
# def write_out_files(self, out):
# self.out_file.write("{:.12e}\t".format(self.current_frequency))
# abs = 1-np.abs(out["R"])**2
# self.out_file.write("{:.12e}\t".format(abs))
# self.out_file.write("\n")
# def interface_fluid_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_rigid(self, M, ieq, L, d):
# SV, k_y = fluid_SV(self.kx, self.k, L.medium.K)
# M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness)
# M[ieq, d[1]] = SV[0, 1]
# ieq += 1
# return ieq
# def semi_infinite_medium(self, M, ieq, L, d):
# M[ieq, d[1]] = 1.
# ieq += 1
# return ieq
# def interface_pem_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx)
# for _i in range(6):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+0][4]] = SV_1[_i, 4]
# M[ieq, d[iinter+0][5]] = SV_1[_i, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[4, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[4, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[4, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = SV_2[3, 2]
# M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[0, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = SV_2[1, 2]
# M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0])
# M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1])
# M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2])
# M[ieq, d[iinter+1][3]] = (SV_2[3, | |
for ind in range(num_outputs):
t1, t5 = accuracy(outputs[ind].detach().cpu(), cls_targets[ind].detach().cpu(), topk=(1, 5))
top1_meters[ind].update(t1.item(), batch_size)
top5_meters[ind].update(t5.item(), batch_size)
loss_meters[ind].update(losses_per_task[ind].item(), batch_size)
if use_gaze:
loss_gaze.update(gaze_coord_loss.item(), batch_size)
if use_hands:
loss_hands.update(hand_coord_loss.item(), batch_size)
batch_time.update(time.time() - t0)
t0 = time.time()
to_print = '[Epoch:{}, Batch {}/{} in {:.3f} s]'.format(cur_epoch, batch_idx, len(train_iterator),
batch_time.val)
to_print += '[Losses {:.4f}[avg:{:.4f}], '.format(losses.val, losses.avg)
if use_gaze:
to_print += '[l_gcoo {:.4f}[avg:{:.4f}], '.format(loss_gaze.val, loss_gaze.avg)
if use_hands:
to_print += '[l_hcoo {:.4f}[avg:{:.4f}], '.format(loss_hands.val, loss_hands.avg)
for ind in range(num_outputs):
to_print += 'T{}::loss {:.4f}[avg:{:.4f}], '.format(ind, loss_meters[ind].val, loss_meters[ind].avg)
for ind in range(num_outputs):
to_print += 'T{}::Top1 {:.3f}[avg:{:.3f}],Top5 {:.3f}[avg:{:.3f}],'.format(
ind, top1_meters[ind].val, top1_meters[ind].avg, top5_meters[ind].val, top5_meters[ind].avg)
to_print += 'LR {:.6f}'.format(lr_scheduler.get_lr()[0])
print_and_save(to_print, log_file)
print_and_save("Epoch train time: {}".format(batch_time.sum), log_file)
def test_mfnet_mo(model, criterion, test_iterator, num_outputs, use_gaze, use_hands, cur_epoch, dataset, log_file, gpus):
loss_meters = [AverageMeter() for _ in range(num_outputs)]
losses = AverageMeter()
top1_meters = [AverageMeter() for _ in range(num_outputs)]
top5_meters = [AverageMeter() for _ in range(num_outputs)]
with torch.no_grad():
model.eval()
print_and_save('Evaluating after epoch: {} on {} set'.format(cur_epoch, dataset), log_file)
for batch_idx, (inputs, targets) in enumerate(test_iterator):
inputs = inputs.cuda(gpus[0])
outputs, coords, heatmaps = model(inputs)
targets = targets.cuda(gpus[0]).transpose(0, 1)
if use_gaze or use_hands:
cls_targets = targets[:num_outputs, :].long()
else:
cls_targets = targets
assert len(cls_targets) == num_outputs
losses_per_task = []
for output, target in zip(outputs, cls_targets):
loss_for_task = criterion(output, target)
losses_per_task.append(loss_for_task)
loss = sum(losses_per_task)
if use_gaze: # need some debugging for the gaze targets
gaze_targets = targets[num_outputs:num_outputs + 16, :].transpose(1, 0).reshape(-1, 8, 1, 2)
# for a single shared layer representation of the two signals
# for gaze slice the first element
gaze_coords = coords[:, :, 0, :]
gaze_coords.unsqueeze_(2) # unsqueeze to add the extra dimension for consistency
gaze_heatmaps = heatmaps[:, :, 0, :]
gaze_heatmaps.unsqueeze_(2)
gaze_coord_loss = calc_coord_loss(gaze_coords, gaze_heatmaps, gaze_targets)
loss = loss + gaze_coord_loss
if use_hands:
hand_targets = targets[-32:, :].transpose(1,0).reshape(-1, 8, 2, 2)
# for hands slice the last two elements, first is left, second is right hand
hand_coords = coords[:, :, -2:, :]
hand_heatmaps = heatmaps[:, :, -2:, :]
hand_coord_loss = calc_coord_loss(hand_coords, hand_heatmaps, hand_targets)
loss = loss + hand_coord_loss
# update metrics
batch_size = outputs[0].size(0)
losses.update(loss.item(), batch_size)
for ind in range(num_outputs):
t1, t5 = accuracy(outputs[ind].detach().cpu(), cls_targets[ind].detach().cpu(), topk=(1, 5))
top1_meters[ind].update(t1.item(), batch_size)
top5_meters[ind].update(t5.item(), batch_size)
loss_meters[ind].update(losses_per_task[ind].item(), batch_size)
to_print = '[Epoch:{}, Batch {}/{}] '.format(cur_epoch, batch_idx, len(test_iterator))
for ind in range(num_outputs):
to_print += 'T{}::Top1 {:.3f}[avg:{:.3f}], Top5 {:.3f}[avg:{:.3f}],'.format(
ind, top1_meters[ind].val, top1_meters[ind].avg, top5_meters[ind].val, top5_meters[ind].avg)
print_and_save(to_print, log_file)
final_print = '{} Results: Loss {:.3f},'.format(dataset, losses.avg)
for ind in range(num_outputs):
final_print += 'T{}::Top1 {:.3f}, Top5 {:.3f},'.format(ind, top1_meters[ind].avg, top5_meters[ind].avg)
print_and_save(final_print, log_file)
return [tasktop1.avg for tasktop1 in top1_meters]
import math
def unnorm_gaze_coords(_coords): # expecting values in [-1, 1]
return ((_coords + 1) * 224 - 1) / 2
def calc_aae(pred, gt, avg='no'):
# input should be [2] with modalities=1
d = 112/math.tan(math.pi/6)
pred = pred - 112
gt = gt - 112
r1 = np.array([pred[0], pred[1], d]) # x, y are inverted in numpy but it doesn't change results
r2 = np.array([gt[0], gt[1], d])
# angles needs to be of dimension batch*temporal*modalities*1
angles = math.atan2(np.linalg.norm(np.cross(r1, r2)), np.dot(r1, r2))
# angles_deg = math.degrees(angles)
angles_deg = np.rad2deg(angles)
return angles_deg
# aae = None
# if avg == 'no': # use each coordinate pair for error calculation
# aae = [deg for deg in angles_deg.flatten()]
# elif avg == 'temporal': # average the angles for one video segment
# angles_deg = np.mean(angles_deg, 1)
# aae = [deg for deg in angles_deg.flatten()]
#
# return aae
from scipy import ndimage
def calc_auc(pred, gt):
z = np.zeros((224, 224))
z[int(pred[0])][int(pred[1])] = 1
z = ndimage.filters.gaussian_filter(z, 14)
z = z - np.min(z)
z = z / np.max(z)
atgt = z[int(gt[0])][int(gt[1])] # z[i][j]
fpbool = z > atgt
auc = (1 - float(fpbool.sum()) / (224 * 224))
return auc
def inner_batch_calc(_model, _inputs, _gaze_targets, _or_targets, _frame_counter, _actual_frame_counter, _aae_frame, _auc_frame,
_aae_temporal, _auc_temporal, _to_print, _log_file, _mf_remaining=8):
_outputs, _coords, _heatmaps = _model(_inputs)
_gaze_coords = _coords[:, :, 0, :]
_gaze_coords = unnorm_gaze_coords(_gaze_coords).cpu().numpy()
_batch_size, _temporal_size, _ = _gaze_targets.shape
for _b in range(_batch_size): # this will always be one, otherwise torch.stack complains for variable temporal dim.
_aae_temp = []
_auc_temp = []
for _t in range(_temporal_size-_mf_remaining, _temporal_size):
# after transforms target gaze might be off the image. this is not evaluated
_actual_frame_counter += 1
if _gaze_targets[_b, _t][0] < 0 or _gaze_targets[_b, _t][0] >= 224 or _gaze_targets[_b, _t][1] < 0 or \
_gaze_targets[_b, _t][1] >= 224: # gt out of evaluated area after cropping
continue
if _or_targets[_b, _t][0] == 0 and _or_targets[_b, _t][1] == 0: # bad ground truth
continue
_frame_counter += 1
_angle_deg = calc_aae(_gaze_coords[_b, _t], _gaze_targets[_b, _t])
_aae_temp.append(_angle_deg)
_aae_frame.update(_angle_deg) # per frame
_auc_once = calc_auc(_gaze_coords[_b, _t], _gaze_targets[_b, _t])
_auc_temp.append(_auc_once)
_auc_frame.update(_auc_once)
if len(_aae_temp) > 0:
_aae_temporal.update(np.mean(_aae_temp)) # per video segment
if len(_auc_temp) > 0:
_auc_temporal.update(np.mean(_auc_temp))
_to_print += '[Gaze::aae_frame {:.3f}[avg:{:.3f}], aae_temporal {:.3f}[avg:{:.3f}],'.format(_aae_frame.val,
_aae_frame.avg,
_aae_temporal.val,
_aae_temporal.avg)
_to_print += '::auc_frame {:.3f}[avg:{:.3f}], auc_temporal {:.3f}[avg:{:.3f}]]'.format(_auc_frame.val,
_auc_frame.avg,
_auc_temporal.val,
_auc_temporal.avg)
print_and_save(_to_print, _log_file)
return _auc_frame, _auc_temporal, _aae_frame, _aae_temporal, _frame_counter, _actual_frame_counter
def validate_mfnet_mo_gaze(model, criterion, test_iterator, num_outputs, use_gaze, use_hands, cur_epoch, dataset, log_file):
auc_frame, auc_temporal = AverageMeter(), AverageMeter()
aae_frame, aae_temporal = AverageMeter(), AverageMeter()
print_and_save('Evaluating after epoch: {} on {} set'.format(cur_epoch, dataset), log_file)
with torch.no_grad():
model.eval()
frame_counter = 0
actual_frame_counter = 0
video_counter = 0
for batch_idx, (inputs, targets, orig_gaze, video_names) in enumerate(test_iterator):
video_counter += 1
to_print = '[Batch {}/{}]'.format(batch_idx, len(test_iterator))
inputs = inputs.cuda()
targets = targets.cuda().transpose(0, 1)
orig_gaze = orig_gaze.cuda().transpose(0, 1)
double_temporal_size = inputs.shape[2]
temporal_size = double_temporal_size // 2
if use_gaze or use_hands:
cls_targets = targets[:num_outputs, :].long()
else:
cls_targets = targets
assert len(cls_targets) == num_outputs
gaze_targets = targets[num_outputs:num_outputs + 2*temporal_size, :].transpose(1, 0).reshape(-1, temporal_size, 1, 2)
gaze_targets.squeeze_(2)
gaze_targets = unnorm_gaze_coords(gaze_targets).cpu().numpy()
orig_targets = orig_gaze[:2*temporal_size, :].transpose(1, 0).reshape(-1, temporal_size, 1, 2)
orig_targets.squeeze_(2)
# batch over the blocks of 16 frames for mfnet
mf_blocks = double_temporal_size//16
mf_remaining = double_temporal_size%16
for mf_i in range(mf_blocks):
mf_inputs = inputs[:,:,mf_i*16:(mf_i+1)*16,:,:]
mf_targets = gaze_targets[:, mf_i*8:(mf_i+1)*8]
or_targets = orig_targets[:, mf_i*8:(mf_i+1)*8]
auc_frame, auc_temporal, aae_frame, aae_temporal, frame_counter, actual_frame_counter = inner_batch_calc(
model, mf_inputs, mf_targets, or_targets, frame_counter, actual_frame_counter, aae_frame, auc_frame,
aae_temporal, auc_temporal, to_print, log_file
)
if mf_remaining > 0:
mf_inputs = inputs[:,:,double_temporal_size-16:,:,:]
mf_targets = gaze_targets[:, temporal_size-8:]
or_targets = orig_targets[:, temporal_size-8:]
auc_frame, auc_temporal, aae_frame, aae_temporal, frame_counter, actual_frame_counter = inner_batch_calc(
model, mf_inputs, mf_targets, or_targets, frame_counter, actual_frame_counter, aae_frame, auc_frame,
aae_temporal, auc_temporal, to_print, log_file, mf_remaining//2
)
to_print = 'Evaluated in total {}/{} frames in {} video segments.'.format(frame_counter, actual_frame_counter,
video_counter)
print_and_save(to_print, log_file)
def validate_mfnet_mo(model, criterion, test_iterator, num_outputs, use_gaze, use_hands, cur_epoch, dataset, log_file):
loss_meters = [AverageMeter() for _ in range(num_outputs)]
losses = AverageMeter()
top1_meters = [AverageMeter() for _ in range(num_outputs)]
top5_meters = [AverageMeter() for _ in range(num_outputs)]
task_outputs = [[] for _ in range(num_outputs)]
print_and_save('Evaluating after epoch: {} on {} set'.format(cur_epoch, dataset), log_file)
with torch.no_grad():
model.eval()
for batch_idx, (inputs, targets, video_names) in enumerate(test_iterator):
inputs = inputs.cuda()
outputs, coords, heatmaps = model(inputs)
targets = targets.cuda().transpose(0, 1)
if use_gaze or use_hands:
cls_targets = targets[:num_outputs, :].long()
else:
cls_targets = targets
assert len(cls_targets) == num_outputs
losses_per_task = []
for output, target in zip(outputs, cls_targets):
loss_for_task = criterion(output, target)
losses_per_task.append(loss_for_task)
loss = sum(losses_per_task)
gaze_coord_loss, hand_coord_loss = 0, 0
if use_gaze:
gaze_targets = targets[num_outputs:num_outputs + 16, :].transpose(1, 0).reshape(-1, 8, 1, 2)
# for a single shared layer representation of the two signals
# for gaze slice the first element
gaze_coords = coords[:, :, 0, :]
gaze_coords.unsqueeze_(2) # unsqueeze to add the extra dimension for consistency
gaze_heatmaps = heatmaps[:, :, 0, :]
gaze_heatmaps.unsqueeze_(2)
gaze_coord_loss = calc_coord_loss(gaze_coords, gaze_heatmaps, gaze_targets)
loss = loss + gaze_coord_loss
if use_hands:
hand_targets = targets[-32:, :].reshape(-1, 8, 2, 2)
# for hands slice the last two elements, first is left, second is right hand
hand_coords = coords[:, :, -2:, :]
hand_heatmaps = heatmaps[:, :, -2:, :]
hand_coord_loss = calc_coord_loss(hand_coords, hand_heatmaps, hand_targets)
loss = loss + hand_coord_loss
batch_size = outputs[0].size(0)
batch_preds = []
for j in range(batch_size):
txt_batch_preds = "{}".format(video_names[j])
for ind in range(num_outputs):
txt_batch_preds += ", "
res = np.argmax(outputs[ind][j].detach().cpu().numpy())
label = cls_targets[ind][j].detach().cpu().numpy()
task_outputs[ind].append([res, label])
txt_batch_preds += "T{} P-L:{}-{}".format(ind, res, label)
batch_preds.append(txt_batch_preds)
losses.update(loss.item(), batch_size)
for ind in range(num_outputs):
t1, t5 = accuracy(outputs[ind].detach().cpu(), cls_targets[ind].detach().cpu(), topk=(1, 5))
top1_meters[ind].update(t1.item(), batch_size)
top5_meters[ind].update(t5.item(), batch_size)
loss_meters[ind].update(losses_per_task[ind].item(), batch_size)
to_print = '[Batch {}/{}]'.format(batch_idx, len(test_iterator))
for ind in range(num_outputs):
to_print += '[T{}::Top1 {:.3f}[avg:{:.3f}], Top5 {:.3f}[avg:{:.3f}]],'.format(ind,
top1_meters[ind].val, top1_meters[ind].avg,
top5_meters[ind].val, top5_meters[ind].avg)
to_print+= '\n\t{}'.format(batch_preds)
print_and_save(to_print, log_file)
to_print = | |
function: The callback function
for asynchronous request. (optional)
:param str id: ProductSize id (required)
:param str filter:
:return: list[Product]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method product_sizes_id_products_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_products_get`")
collection_formats = {}
resource_path = '/ProductSizes/{id}/products'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Product]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def product_sizes_id_products_post(self, id, **kwargs):
"""
Creates a new instance in products of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_products_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: ProductSize id (required)
:param Product data:
:return: Product
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.product_sizes_id_products_post_with_http_info(id, **kwargs)
else:
(data) = self.product_sizes_id_products_post_with_http_info(id, **kwargs)
return data
def product_sizes_id_products_post_with_http_info(self, id, **kwargs):
"""
Creates a new instance in products of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_products_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: ProductSize id (required)
:param Product data:
:return: Product
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method product_sizes_id_products_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_products_post`")
collection_formats = {}
resource_path = '/ProductSizes/{id}/products'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Product',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def product_sizes_id_put(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_put(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param ProductSize data: Model instance data
:return: ProductSize
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.product_sizes_id_put_with_http_info(id, **kwargs)
else:
(data) = self.product_sizes_id_put_with_http_info(id, **kwargs)
return data
def product_sizes_id_put_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_put_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param ProductSize data: Model instance data
:return: ProductSize
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method product_sizes_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_put`")
collection_formats = {}
resource_path = '/ProductSizes/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductSize',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def product_sizes_id_replace_post(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_replace_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param ProductSize data: Model instance data
:return: ProductSize
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.product_sizes_id_replace_post_with_http_info(id, **kwargs)
else:
(data) = self.product_sizes_id_replace_post_with_http_info(id, **kwargs)
return data
def product_sizes_id_replace_post_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_replace_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param ProductSize data: Model instance data
:return: ProductSize
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method product_sizes_id_replace_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_replace_post`")
collection_formats = {}
resource_path = '/ProductSizes/{id}/replace'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductSize',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def product_sizes_id_size_materials_count_get(self, id, **kwargs):
"""
Counts sizeMaterials of ProductSize.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.product_sizes_id_size_materials_count_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous | |
+ 1)] = [
estimator.score(metric, pos_label=pos_label, cutoff=cutoff)
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(metric, pos_label=pos_label, cutoff=cutoff)
]
else:
result["{}-fold".format(i + 1)] = [
estimator.score(m, pos_label=pos_label, cutoff=cutoff)
for m in metric
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m, pos_label=pos_label, cutoff=cutoff)
for m in metric
]
except:
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.classification_report(
cutoff=cutoff
).values["value"][0:-1]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.classification_report(cutoff=cutoff).values[
"value"
][
0:-1
]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [
estimator.score(metric, cutoff=cutoff)
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(metric, cutoff=cutoff)
]
else:
result["{}-fold".format(i + 1)] = [
estimator.score(m, cutoff=cutoff) for m in metric
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m, cutoff=cutoff) for m in metric
]
try:
estimator.drop()
except:
pass
n = len(final_metrics)
total = [[] for item in range(n)]
for i in range(cv):
for k in range(n):
total[k] += [result["{}-fold".format(i + 1)][k]]
if training_score:
total_train = [[] for item in range(n)]
for i in range(cv):
for k in range(n):
total_train[k] += [result_train["{}-fold".format(i + 1)][k]]
result["avg"], result["std"] = [], []
if training_score:
result_train["avg"], result_train["std"] = [], []
for item in total:
result["avg"] += [statistics.mean([float(elem) for elem in item])]
result["std"] += [statistics.stdev([float(elem) for elem in item])]
if training_score:
for item in total_train:
result_train["avg"] += [statistics.mean([float(elem) for elem in item])]
result_train["std"] += [statistics.stdev([float(elem) for elem in item])]
total_time += [
statistics.mean([float(elem) for elem in total_time]),
statistics.stdev([float(elem) for elem in total_time]),
]
result = tablesample(values=result).transpose()
if show_time:
result.values["time"] = total_time
if training_score:
result_train = tablesample(values=result_train).transpose()
if show_time:
result_train.values["time"] = total_time
if training_score:
return result, result_train
else:
return result
# ---#
def elbow(
input_relation: (str, vDataFrame),
X: list = [],
cursor=None,
n_cluster: (tuple, list) = (1, 15),
init: (str, list) = "kmeanspp",
max_iter: int = 50,
tol: float = 1e-4,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws an Elbow Curve.
Parameters
----------
input_relation: str/vDataFrame
Relation to use to train the model.
X: list, optional
List of the predictor columns. If empty all the numerical vcolumns will
be used.
cursor: DBcursor, optional
Vertica DB cursor.
n_cluster: tuple/list, optional
Tuple representing the number of cluster to start with and to end with.
It can also be customized list with the different K to test.
init: str/list, optional
The method to use to find the initial cluster centers.
kmeanspp : Use the KMeans++ method to initialize the centers.
random : The initial centers
It can be also a list with the initial cluster centers to use.
max_iter: int, optional
The maximum number of iterations the algorithm performs.
tol: float, optional
Determines whether the algorithm has converged. The algorithm is considered
converged after no center has moved more than a distance of 'tol' from the
previous iteration.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("X", X, [list],),
("input_relation", input_relation, [str, vDataFrame],),
("n_cluster", n_cluster, [list],),
("init", init, ["kmeanspp", "random"],),
("max_iter", max_iter, [int, float],),
("tol", tol, [int, float],),
]
)
cursor, conn = check_cursor(cursor, input_relation)[0:2]
version(cursor=cursor, condition=[8, 0, 0])
if isinstance(n_cluster, tuple):
L = range(n_cluster[0], n_cluster[1])
else:
L = n_cluster
L.sort()
schema, relation = schema_relation(input_relation)
all_within_cluster_SS = []
if isinstance(n_cluster, tuple):
L = [i for i in range(n_cluster[0], n_cluster[1])]
else:
L = n_cluster
L.sort()
for i in L:
cursor.execute(
"DROP MODEL IF EXISTS {}.VERTICAPY_KMEANS_TMP_{}".format(
schema, get_session(cursor)
)
)
from verticapy.learn.cluster import KMeans
model = KMeans(
"{}.VERTICAPY_KMEANS_TMP_{}".format(schema, get_session(cursor)),
cursor,
i,
init,
max_iter,
tol,
)
model.fit(input_relation, X)
all_within_cluster_SS += [float(model.metrics_.values["value"][3])]
model.drop()
if conn:
conn.close()
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
ax.grid(axis="y")
param = {
"color": gen_colors()[0],
"marker": "o",
"markerfacecolor": "white",
"markersize": 7,
"markeredgecolor": "black",
}
ax.plot(
L, all_within_cluster_SS, **updated_dict(param, style_kwds),
)
ax.set_title("Elbow Curve")
ax.set_xlabel("Number of Clusters")
ax.set_ylabel("Between-Cluster SS / Total SS")
values = {"index": L, "Within-Cluster SS": all_within_cluster_SS}
return tablesample(values=values)
# ---#
def grid_search_cv(
estimator,
param_grid: dict,
input_relation: (str, vDataFrame),
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: (int, float, str) = None,
cutoff: float = -1,
training_score: bool = True,
skip_error: bool = False,
):
"""
---------------------------------------------------------------------------
Computes the K-Fold grid search of an estimator.
Parameters
----------
estimator: object
Vertica estimator having a fit method and a DB cursor.
param_grid: dict
Dictionary of the parameters to test.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max Error
mae : Mean Absolute Error
median : Median Absolute Error
mse : Mean Squared Error
msle : Mean Squared Log Error
r2 : R squared coefficient
r2a : R2 adjusted
rmse : Root Mean Squared Error
var : Explained Variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
training_score: bool, optional
If set to True, the training score will be computed with the validation score.
skip_error: bool, optional
If set to True and an error occurs, it will be displayed and not raised.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("metric", metric, [str]),
("param_grid", param_grid, [dict]),
("training_score", training_score, [bool]),
("skip_error", skip_error, [bool]),
]
)
if (
estimator.type
in (
"RandomForestRegressor",
"LinearSVR",
"LinearRegression",
"KNeighborsRegressor",
)
and metric == "auto"
):
metric = "rmse"
elif metric == "auto":
metric = "logloss"
for param in param_grid:
assert isinstance(param_grid[param], Iterable) and not (
isinstance(param_grid[param], str)
), ParameterError(
f"The parameter 'param_grid' must be a dictionary where each value is a list of parameters, found {type(param_grid[param])} for parameter '{param}'."
)
all_configuration = [
dict(zip(param_grid.keys(), values)) for values in product(*param_grid.values())
]
# testing all the config
for config in all_configuration:
estimator.set_params(config)
# applying all the config
data = []
for config in all_configuration:
try:
estimator.set_params(config)
current_cv = cross_validate(
estimator,
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
training_score,
)
if training_score:
keys = [elem for elem in current_cv[0].values]
data += [
(
config,
current_cv[0][keys[1]][cv],
current_cv[1][keys[1]][cv],
current_cv[0][keys[2]][cv],
current_cv[0][keys[1]][cv + 1],
current_cv[1][keys[1]][cv + 1],
)
]
else:
keys = [elem for elem in current_cv.values]
data += [
(
config,
current_cv[keys[1]][cv],
current_cv[keys[2]][cv],
current_cv[keys[1]][cv + 1],
)
]
except Exception as e:
if skip_error:
print(e)
else:
raise (e)
reverse = True
if metric in ["logloss", "max", "mae", "median", "mse", "msle", "rmse"]:
reverse = False
data.sort(key=lambda tup: tup[1], reverse=reverse)
if training_score:
result = tablesample(
{
"parameters": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_train_score": [elem[2] for elem in data],
"avg_time": [elem[3] for elem in data],
"score_std": [elem[4] for elem in data],
"score_train_std": [elem[5] for elem in data],
}
)
else:
result = tablesample(
{
"parameters": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_time": [elem[2] for elem in data],
"score_std": [elem[3] for elem in data],
}
)
return result
# ---#
def lift_chart(
y_true: str,
y_score: str,
input_relation: (str, vDataFrame),
cursor=None,
pos_label: (int, float, str) = 1,
nbins: int = 30,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Lift Chart.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction Probability.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. | |
> 10"
]
alarm_def = {
u'name': '<NAME>',
u'expression': 'test.metric > 10'
}
for expression in bad_expressions:
alarm_def[u'expression'] = expression
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="POST",
body=json.dumps(alarm_def))
self.assertEqual(response.status, '422 Unprocessable Entity',
u'Expression {} should have failed'.format(expression))
def test_alarm_definition_create_with_occupied_alarm_definition_name(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = [{
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'<NAME>',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}]
alarm_def = {
u'name': u'<NAME>',
u'expression': u'max(test.metric{hostname=host}) gte 1'
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="POST",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_409)
def test_alarm_definition_update(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
self.alarm_def_repo_mock.return_value.update_or_patch_alarm_definition.return_value = (
{u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'match_by': u'hostname',
u'name': u'<NAME>',
u'actions_enabled': True,
u'undetermined_actions': [],
u'is_deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW'},
{'old': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})},
'changed': {},
'new': {},
'unchanged': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})}})
expected_def = {
u'id': u'00000001-0001-0001-0001-000000000001',
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'links': [{u'href': u'http://falconframework.org/v2.0/alarm-definitions/'
u'00000001-0001-0001-0001-000000000001',
u'rel': u'self'}],
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'<NAME>',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
}
result = self.simulate_request(path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(result.status, falcon.HTTP_200)
result_def = result.json
self.assertEqual(result_def, expected_def)
def test_alarm_definition_patch_incorrect_id(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = [{
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'<NAME>',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}]
alarm_def = {
u'name': u'Test Alarm Definition Updated',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/9999999-0001-0001-0001-000000000001",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PATCH",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_409)
def test_alarm_definition_put_incorrect_period_value(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
period = 'times 0'
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1 ' + period,
u'severity': u'LOW',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/00000001-0001-0001-0001-000000000001",
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_422)
def test_alarm_definition_patch_no_id(self):
alarm_def = {
u'name': u'Test Alarm Definition Updated',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PATCH",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_400)
def test_alarm_definition_update_no_id(self):
alarm_def = {
u'name': u'Test Alarm Definition Updated',
}
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(response.status, falcon.HTTP_400)
def test_alarm_definition_delete(self):
self.alarm_def_repo_mock.return_value.get_get_sub_alarm_definitions.return_value = [{
'alarm_definition_id': '123',
'dimensions': 'flavor_id=777',
'function': 'AVG',
'id': '111',
'metric_name': 'cpu.idle_perc',
'operator': 'GT',
'period': 60,
'periods': 1,
'is_deterministic': False,
'threshold': 10.0}]
self.alarm_def_repo_mock.return_value.get_alarm_metrics.return_value = [{
'alarm_id': '1',
'dimensions': 'flavor_id=777',
'name': 'cpu.idle_perc'}]
self.alarm_def_repo_mock.return_value.get_sub_alarms.return_value = [{
'alarm_definition_id': '1',
'alarm_id': '2',
'expression': 'avg(cpu.idle_perc{flavor_id=777}) > 10',
'sub_alarm_id': '43'}]
self.alarm_def_repo_mock.return_value.delete_alarm_definition.return_value = True
response = self.simulate_request(
path='/v2.0/alarm-definitions/00000001-0001-0001-0001-000000000001',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='DELETE')
self.assertEqual(response.status, falcon.HTTP_204)
def test_alarm_definition_delete_alarm_definition_not_exist(self):
self.alarm_def_repo_mock.return_value.get_get_sub_alarm_definitions.return_value = []
self.alarm_def_repo_mock.return_value.get_alarm_metrics.return_value = []
self.alarm_def_repo_mock.return_value.get_sub_alarms.return_value = []
self.alarm_def_repo_mock.return_value.delete_alarm_definition.return_value = False
response = self.simulate_request(
path='/v2.0/alarm-definitions/00000001-0001-0001-0001-000000000001',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='DELETE')
self.assertEqual(response.status, falcon.HTTP_404)
def test_alarm_definition_delete_no_id(self):
response = self.simulate_request(
path="/v2.0/alarm-definitions/",
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="DELETE")
self.assertEqual(response.status, falcon.HTTP_400)
def test_alarm_definition_patch(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
description = u'Non-ASCII character: \u2603'
new_name = u'Test Alarm Updated'
actions_enabled = True
alarm_def_id = u'00000001-0001-0001-0001-000000000001'
alarm_expression = u'max(test.metric{hostname=host}) gte 1'
severity = u'LOW'
match_by = u'hostname'
self.alarm_def_repo_mock.return_value.update_or_patch_alarm_definition.return_value = (
{u'alarm_actions': [],
u'ok_actions': [],
u'description': description,
u'match_by': match_by,
u'name': new_name,
u'actions_enabled': actions_enabled,
u'undetermined_actions': [],
u'is_deterministic': False,
u'expression': alarm_expression,
u'id': alarm_def_id,
u'severity': severity},
{'old': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})},
'changed': {},
'new': {},
'unchanged': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'is_deterministic': False,
'periods': 1})}})
expected_def = {
u'id': alarm_def_id,
u'alarm_actions': [],
u'ok_actions': [],
u'description': description,
u'links': [{u'href': u'http://falconframework.org/v2.0/alarm-definitions/'
u'00000001-0001-0001-0001-000000000001',
u'rel': u'self'}],
u'match_by': [match_by],
u'name': new_name,
u'actions_enabled': actions_enabled,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': alarm_expression,
u'severity': severity,
}
alarm_def = {
u'name': u'Test Alarm Updated',
}
result = self.simulate_request(path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PATCH",
body=json.dumps(alarm_def))
self.assertEqual(result.status, falcon.HTTP_200)
result_def = result.json
self.assertEqual(result_def, expected_def)
# If the alarm-definition-updated event does not have all of the
# fields set, the Threshold Engine will get confused. For example,
# if alarmActionsEnabled is none, thresh will read that as false
# and pass that value onto the Notification Engine which will not
# create a notification even actions_enabled is True in the
# database. So, ensure all fields are set correctly
((_, event), _) = self._send_event.call_args
expr = u'max(test.metric{hostname=host}, 60) gte 1 times 1'
sub_expression = {'11111': {u'expression': expr,
u'function': 'max',
u'metricDefinition': {
u'dimensions': {'hostname': 'host'},
u'name': 'test.metric'},
u'operator': 'gte',
u'period': 60,
u'periods': 1,
u'threshold': 1}}
fields = {u'alarmActionsEnabled': actions_enabled,
u'alarmDefinitionId': alarm_def_id,
u'alarmDescription': description,
u'alarmExpression': alarm_expression,
u'alarmName': new_name,
u'changedSubExpressions': {},
u'matchBy': [match_by],
u'severity': severity,
u'tenantId': u'fedcba9876543210fedcba9876543210',
u'newAlarmSubExpressions': {},
u'oldAlarmSubExpressions': sub_expression,
u'unchangedSubExpressions': sub_expression}
reference = {u'alarm-definition-updated': fields}
self.assertEqual(reference, event)
def test_alarm_definition_update_missing_fields(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = []
self.alarm_def_repo_mock.return_value.update_or_patch_alarm_definition.return_value = (
{u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'match_by': u'hostname',
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'is_deterministic': False,
u'severity': u'LOW'},
{'old': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'periods': 1,
'is_deterministic': False})},
'changed': {},
'new': {},
'unchanged': {'11111': sub_alarm_definition.SubAlarmDefinition(
row={'id': '11111',
'alarm_definition_id': u'00000001-0001-0001-0001-000000000001',
'function': 'max',
'metric_name': 'test.metric',
'dimensions': 'hostname=host',
'operator': 'gte',
'threshold': 1,
'period': 60,
'periods': 1,
'is_deterministic': False})}})
expected_def = {
u'id': u'00000001-0001-0001-0001-000000000001',
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'links': [{u'href': u'http://falconframework.org/v2.0/alarm-definitions/'
u'00000001-0001-0001-0001-000000000001',
u'rel': u'self'}],
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW',
u'deterministic': False
}
alarm_def = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'',
u'match_by': [u'hostname'],
u'name': u'Test Alarm',
u'actions_enabled': True,
u'undetermined_actions': [],
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'severity': u'LOW'
}
result = self.simulate_request(path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(result.status, falcon.HTTP_200)
result_def = result.json
self.assertEqual(result_def, expected_def)
for key, value in alarm_def.items():
del alarm_def[key]
response = self.simulate_request(
path="/v2.0/alarm-definitions/%s" % expected_def[u'id'],
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method="PUT",
body=json.dumps(alarm_def))
self.assertEqual(response.status, "422 Unprocessable Entity",
u"should have failed without key {}".format(key))
alarm_def[key] = value
def test_alarm_definition_get_specific_alarm(self):
self.alarm_def_repo_mock.return_value.get_alarm_definition.return_value = {
'alarm_actions': None,
'ok_actions': None,
# The description field was decoded to unicode when the
# alarm_definition was created.
'description': u'Non-ASCII character: \u2603',
'match_by': u'hostname',
'name': u'Test Alarm',
'actions_enabled': 1,
'undetermined_actions': None,
'deterministic': False,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}
expected_data = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': u'Non-ASCII character: \u2603',
u'match_by': [u'hostname'],
u'name': u'<NAME>',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW',
}
response = self.simulate_request(
path='/v2.0/alarm-definitions/%s' % (expected_data[u'id']),
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_get_specific_alarm_description_none(self):
self.alarm_def_repo_mock.return_value.get_alarm_definition.return_value = {
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'<NAME>',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}
expected_data = {
u'alarm_actions': [],
u'ok_actions': [],
u'description': None,
u'match_by': [u'hostname'],
u'name': u'<NAME>',
u'actions_enabled': True,
u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW',
}
response = self.simulate_request(
path='/v2.0/alarm-definitions/%s' % (expected_data[u'id']),
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_get_alarm_definitions_with_multibyte_character(self):
def_name = 'alarm_definition'
if six.PY2:
def_name = def_name.decode('utf8')
expected_data = {
u'alarm_actions': [], u'ok_actions': [],
u'description': None, u'match_by': [u'hostname'],
u'actions_enabled': True, u'undetermined_actions': [],
u'deterministic': False,
u'expression': u'max(test.metric{hostname=host}) gte 1',
u'id': u'00000001-0001-0001-0001-000000000001',
u'severity': u'LOW', u'name': def_name
}
self.alarm_def_repo_mock.return_value.get_alarm_definition.return_value = {
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': def_name,
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}
response = self.simulate_request(
path='/v2.0/alarm-definitions/%s' % (expected_data[u'id']),
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
}
)
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_data))
def test_alarm_definition_get_alarm_definition_list(self):
self.alarm_def_repo_mock.return_value.get_alarm_definitions.return_value = [{
'alarm_actions': None,
'ok_actions': None,
'description': None,
'match_by': u'hostname',
'name': u'<NAME>',
'actions_enabled': 1,
'undetermined_actions': None,
'expression': u'max(test.metric{hostname=host}) gte 1',
'id': u'00000001-0001-0001-0001-000000000001',
'severity': u'LOW'
}]
link = 'http://falconframework.org/v2.0/alarm-definitions/' \
'00000001-0001-0001-0001-000000000001'
expected_data = {
| |
<filename>trezor_crypto/trezor_ctypes_gen.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# TARGET arch is: ["'--std=c99", '-fPIC', '-DUSE_MONERO=1', '-DUSE_KECCAK=1', '-DUSE_LIBSODIUM', '-DSODIUM_STATIC=1', '-DRAND_PLATFORM_INDEPENDENT=1', "-Isrc/'"]
# WORD_SIZE is: 8
# POINTER_SIZE is: 8
# LONGDOUBLE_SIZE is: 16
#
import ctypes
c_int128 = ctypes.c_ubyte*16
c_uint128 = c_int128
void = None
if ctypes.sizeof(ctypes.c_longdouble) == 16:
c_long_double_t = ctypes.c_longdouble
else:
c_long_double_t = ctypes.c_ubyte*16
# if local wordsize is same as target, keep ctypes pointer function.
if ctypes.sizeof(ctypes.c_void_p) == 8:
POINTER_T = ctypes.POINTER
else:
# required to access _ctypes
import _ctypes
# Emulate a pointer class using the approriate c_int32/c_int64 type
# The new class should have :
# ['__module__', 'from_param', '_type_', '__dict__', '__weakref__', '__doc__']
# but the class should be submitted to a unique instance for each base type
# to that if A == B, POINTER_T(A) == POINTER_T(B)
ctypes._pointer_t_type_cache = {}
def POINTER_T(pointee):
# a pointer should have the same length as LONG
fake_ptr_base_type = ctypes.c_uint64
# specific case for c_void_p
if pointee is None: # VOID pointer type. c_void_p.
pointee = type(None) # ctypes.c_void_p # ctypes.c_ulong
clsname = 'c_void'
else:
clsname = pointee.__name__
if clsname in ctypes._pointer_t_type_cache:
return ctypes._pointer_t_type_cache[clsname]
# make template
class _T(_ctypes._SimpleCData,):
_type_ = 'L'
_subtype_ = pointee
def _sub_addr_(self):
return self.value
def __repr__(self):
return '%s(%d)'%(clsname, self.value)
def contents(self):
raise TypeError('This is not a ctypes pointer.')
def __init__(self, **args):
raise TypeError('This is not a ctypes pointer. It is not instanciable.')
_class = type('LP_%d_%s'%(8, clsname), (_T,),{})
ctypes._pointer_t_type_cache[clsname] = _class
return _class
uint32_t = ctypes.c_uint32
uint64_t = ctypes.c_uint64
uint8_t = ctypes.c_uint8
int32_t = ctypes.c_int32
int64_t = ctypes.c_int64
size_t = ctypes.c_uint64
class union_c__UA_aes_inf(ctypes.Union):
_pack_ = True # source:False
_fields_ = [
('l', ctypes.c_uint32),
('b', ctypes.c_ubyte * 4),
]
aes_inf = union_c__UA_aes_inf
class struct_c__SA_aes_encrypt_ctx(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ks', ctypes.c_uint32 * 60),
('inf', aes_inf),
('PADDING_0', ctypes.c_ubyte * 12),
]
aes_encrypt_ctx = struct_c__SA_aes_encrypt_ctx
class struct_c__SA_aes_decrypt_ctx(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ks', ctypes.c_uint32 * 60),
('inf', aes_inf),
('PADDING_0', ctypes.c_ubyte * 12),
]
aes_decrypt_ctx = struct_c__SA_aes_decrypt_ctx
cbuf_inc = ctypes.CFUNCTYPE(None, POINTER_T(ctypes.c_ubyte))
t_rc = None # Variable ctypes.c_int32
BASE32_ALPHABET_RFC4648 = None # Variable POINTER_T(ctypes.c_char)
b58digits_ordered = [] # Variable ctypes.c_char * 0
b58digits_map = ctypes.c_byte * 0 # Variable ctypes.c_byte * 0
class struct_c__SA_bignum256(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('val', ctypes.c_uint32 * 9),
]
bignum256 = struct_c__SA_bignum256
class struct_c__SA_curve_info(ctypes.Structure):
pass
# values for enumeration 'c__EA_HasherType'
HASHER_SHA2 = 0
HASHER_SHA2D = 1
HASHER_SHA2_RIPEMD = 2
HASHER_SHA3 = 3
HASHER_SHA3K = 4
HASHER_BLAKE = 5
HASHER_BLAKED = 6
HASHER_BLAKE_RIPEMD = 7
HASHER_GROESTLD_TRUNC = 8
HASHER_OVERWINTER_PREVOUTS = 9
HASHER_OVERWINTER_SEQUENCE = 10
HASHER_OVERWINTER_OUTPUTS = 11
HASHER_OVERWINTER_PREIMAGE = 12
c__EA_HasherType = ctypes.c_int # enum
HasherType = c__EA_HasherType
class struct_c__SA_ecdsa_curve(ctypes.Structure):
pass
class struct_c__SA_curve_point(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('x', bignum256),
('y', bignum256),
]
curve_point = struct_c__SA_curve_point
struct_c__SA_ecdsa_curve._pack_ = True # source:False
struct_c__SA_ecdsa_curve._fields_ = [
('prime', bignum256),
('G', curve_point),
('order', bignum256),
('order_half', bignum256),
('a', ctypes.c_int32),
('b', bignum256),
('cp', struct_c__SA_curve_point * 8 * 64),
]
struct_c__SA_curve_info._pack_ = True # source:False
struct_c__SA_curve_info._fields_ = [
('bip32_name', POINTER_T(ctypes.c_char)),
('params', POINTER_T(struct_c__SA_ecdsa_curve)),
('hasher_base58', HasherType),
('hasher_sign', HasherType),
('hasher_pubkey', HasherType),
('PADDING_0', ctypes.c_ubyte * 4),
]
curve_info = struct_c__SA_curve_info
class struct_c__SA_HDNode(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('depth', ctypes.c_uint32),
('child_num', ctypes.c_uint32),
('chain_code', ctypes.c_ubyte * 32),
('private_key', ctypes.c_ubyte * 32),
('private_key_extension', ctypes.c_ubyte * 32),
('public_key', ctypes.c_ubyte * 33),
('PADDING_0', ctypes.c_ubyte * 7),
('curve', POINTER_T(struct_c__SA_curve_info)),
]
HDNode = struct_c__SA_HDNode
wordlist = POINTER_T(ctypes.c_char) * 2049 # Variable POINTER_T(ctypes.c_char) * 2049
class struct_c__SA_BLAKE256_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('h', ctypes.c_uint32 * 8),
('s', ctypes.c_uint32 * 4),
('t', ctypes.c_uint32 * 2),
('buflen', ctypes.c_uint64),
('nullt', ctypes.c_ubyte),
('buf', ctypes.c_ubyte * 64),
('PADDING_0', ctypes.c_ubyte * 7),
]
BLAKE256_CTX = struct_c__SA_BLAKE256_CTX
# values for enumeration 'blake2b_constant'
BLAKE2B_BLOCKBYTES = 128
BLAKE2B_OUTBYTES = 64
BLAKE2B_KEYBYTES = 64
BLAKE2B_SALTBYTES = 16
BLAKE2B_PERSONALBYTES = 16
blake2b_constant = ctypes.c_int # enum
class struct___blake2b_state(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('h', ctypes.c_uint64 * 8),
('t', ctypes.c_uint64 * 2),
('f', ctypes.c_uint64 * 2),
('buf', ctypes.c_ubyte * 128),
('buflen', ctypes.c_uint64),
('outlen', ctypes.c_uint64),
('last_node', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 7),
]
blake2b_state = struct___blake2b_state
# values for enumeration 'blake2s_constant'
BLAKE2S_BLOCKBYTES = 64
BLAKE2S_OUTBYTES = 32
BLAKE2S_KEYBYTES = 32
BLAKE2S_SALTBYTES = 8
BLAKE2S_PERSONALBYTES = 8
blake2s_constant = ctypes.c_int # enum
class struct___blake2s_state(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('h', ctypes.c_uint32 * 8),
('t', ctypes.c_uint32 * 2),
('f', ctypes.c_uint32 * 2),
('buf', ctypes.c_ubyte * 64),
('buflen', ctypes.c_uint32),
('outlen', ctypes.c_ubyte),
('last_node', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 2),
]
blake2s_state = struct___blake2s_state
class struct_c__SA_chacha20poly1305_ctx(ctypes.Structure):
pass
class struct_c__SA_ECRYPT_ctx(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('input', ctypes.c_uint32 * 16),
]
ECRYPT_ctx = struct_c__SA_ECRYPT_ctx
class struct_poly1305_context(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('aligner', ctypes.c_uint64),
('opaque', ctypes.c_ubyte * 136),
]
poly1305_context = struct_poly1305_context
struct_c__SA_chacha20poly1305_ctx._pack_ = True # source:False
struct_c__SA_chacha20poly1305_ctx._fields_ = [
('chacha20', ECRYPT_ctx),
('poly1305', poly1305_context),
]
chacha20poly1305_ctx = struct_c__SA_chacha20poly1305_ctx
s8 = ctypes.c_byte
u8 = ctypes.c_ubyte
s16 = ctypes.c_int16
u16 = ctypes.c_uint16
s32 = ctypes.c_int32
u32 = ctypes.c_uint32
s64 = ctypes.c_int64
u64 = ctypes.c_uint64
class struct_poly1305_state_internal_t(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
poly1305_state_internal_t = struct_poly1305_state_internal_t
SECP256K1_NAME = [] # Variable ctypes.c_char * 0
SECP256K1_DECRED_NAME = [] # Variable ctypes.c_char * 0
SECP256K1_GROESTL_NAME = [] # Variable ctypes.c_char * 0
NIST256P1_NAME = [] # Variable ctypes.c_char * 0
ED25519_NAME = [] # Variable ctypes.c_char * 0
ED25519_CARDANO_NAME = [] # Variable ctypes.c_char * 0
ED25519_SHA3_NAME = [] # Variable ctypes.c_char * 0
ED25519_KECCAK_NAME = [] # Variable ctypes.c_char * 0
CURVE25519_NAME = [] # Variable ctypes.c_char * 0
ecdsa_curve = struct_c__SA_ecdsa_curve
bignum25519 = ctypes.c_uint32 * 10
hash_512bits = ctypes.c_ubyte * 64
class struct_ge25519_t(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('x', ctypes.c_uint32 * 10),
('y', ctypes.c_uint32 * 10),
('z', ctypes.c_uint32 * 10),
('t', ctypes.c_uint32 * 10),
]
ge25519 = struct_ge25519_t
class struct_ge25519_p1p1_t(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('x', ctypes.c_uint32 * 10),
('y', ctypes.c_uint32 * 10),
('z', ctypes.c_uint32 * 10),
('t', ctypes.c_uint32 * 10),
]
ge25519_p1p1 = struct_ge25519_p1p1_t
class struct_ge25519_niels_t(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ysubx', ctypes.c_uint32 * 10),
('xaddy', ctypes.c_uint32 * 10),
('t2d', ctypes.c_uint32 * 10),
]
ge25519_niels = struct_ge25519_niels_t
class struct_ge25519_pniels_t(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ysubx', ctypes.c_uint32 * 10),
('xaddy', ctypes.c_uint32 * 10),
('z', ctypes.c_uint32 * 10),
('t2d', ctypes.c_uint32 * 10),
]
ge25519_pniels = struct_ge25519_pniels_t
ed25519_signature = ctypes.c_ubyte * 64
ed25519_public_key = ctypes.c_ubyte * 32
ed25519_secret_key = ctypes.c_ubyte * 32
bignum256modm_element_t = ctypes.c_uint32
bignum256modm = ctypes.c_uint32 * 9
class struct_c_groestlDOTh_S_groestlDOTh_2155(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('PADDING_0', ctypes.c_ubyte),
]
class struct_c__SA_sph_groestl_big_context(ctypes.Structure):
pass
class union_c__SA_sph_groestl_big_context_0(ctypes.Union):
_pack_ = True # source:False
_fields_ = [
('wide', ctypes.c_uint64 * 16),
('narrow', ctypes.c_uint32 * 32),
]
struct_c__SA_sph_groestl_big_context._pack_ = True # source:False
struct_c__SA_sph_groestl_big_context._fields_ = [
('buf', ctypes.c_ubyte * 128),
('ptr', ctypes.c_uint64),
('state', union_c__SA_sph_groestl_big_context_0),
('count', ctypes.c_uint64),
]
sph_groestl_big_context = struct_c__SA_sph_groestl_big_context
GROESTL512_CTX = struct_c__SA_sph_groestl_big_context
sph_u32 = ctypes.c_uint32
sph_s32 = ctypes.c_int32
sph_u64 = ctypes.c_uint64
sph_s64 = ctypes.c_int64
class struct_c__SA_Hasher(ctypes.Structure):
pass
class union_c__SA_Hasher_0(ctypes.Union):
pass
class struct_SHA3_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('hash', ctypes.c_uint64 * 25),
('message', ctypes.c_uint64 * 24),
('rest', ctypes.c_uint32),
('block_size', ctypes.c_uint32),
]
SHA3_CTX = struct_SHA3_CTX
class struct__SHA256_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('state', ctypes.c_uint32 * 8),
('bitcount', ctypes.c_uint64),
('buffer', ctypes.c_uint32 * 16),
]
SHA256_CTX = struct__SHA256_CTX
union_c__SA_Hasher_0._pack_ = True # source:False
union_c__SA_Hasher_0._fields_ = [
('sha2', SHA256_CTX),
('sha3', SHA3_CTX),
('blake', BLAKE256_CTX),
('groestl', GROESTL512_CTX),
('blake2b', blake2b_state),
('PADDING_0', ctypes.c_ubyte * 152),
]
struct_c__SA_Hasher._pack_ = True # source:False
struct_c__SA_Hasher._fields_ = [
('type', HasherType),
('PADDING_0', ctypes.c_ubyte * 4),
('ctx', union_c__SA_Hasher_0),
]
Hasher = struct_c__SA_Hasher
class struct__HMAC_SHA256_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('o_key_pad', ctypes.c_ubyte * 64),
('ctx', SHA256_CTX),
]
HMAC_SHA256_CTX = struct__HMAC_SHA256_CTX
class struct__HMAC_SHA512_CTX(ctypes.Structure):
pass
class struct__SHA512_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('state', ctypes.c_uint64 * 8),
('bitcount', ctypes.c_uint64 * 2),
('buffer', ctypes.c_uint64 * 16),
]
SHA512_CTX = struct__SHA512_CTX
struct__HMAC_SHA512_CTX._pack_ = True # source:False
struct__HMAC_SHA512_CTX._fields_ = [
('o_key_pad', ctypes.c_ubyte * 128),
('ctx', SHA512_CTX),
]
HMAC_SHA512_CTX = struct__HMAC_SHA512_CTX
xmr_amount = ctypes.c_uint64
xmr_key64_t = ctypes.c_ubyte * 32 * 64
class struct_xmr_boro_sig(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('s0', ctypes.c_ubyte * 32 * 64),
('s1', ctypes.c_ubyte * 32 * 64),
('ee', ctypes.c_ubyte * 32),
]
xmr_boro_sig_t = struct_xmr_boro_sig
class struct_range_sig(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('asig', xmr_boro_sig_t),
('Ci', ctypes.c_ubyte * 32 * 64),
]
xmr_range_sig_t = struct_range_sig
xmr_key_t = ctypes.c_ubyte * 32
class struct_xmr_ctkey(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('dest', ctypes.c_ubyte * 32),
('mask', ctypes.c_ubyte * 32),
]
xmr_ctkey_t = struct_xmr_ctkey
class struct_c__SA_nem_transaction_ctx(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('public_key', ctypes.c_ubyte * 32),
('buffer', POINTER_T(ctypes.c_ubyte)),
('offset', ctypes.c_uint64),
('size', ctypes.c_uint64),
]
nem_transaction_ctx = struct_c__SA_nem_transaction_ctx
nist256p1 = struct_c__SA_ecdsa_curve # Variable struct_c__SA_ecdsa_curve
nist256p1_info = struct_c__SA_curve_info # Variable struct_c__SA_curve_info
class struct__PBKDF2_HMAC_SHA256_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('odig', ctypes.c_uint32 * 8),
('idig', ctypes.c_uint32 * 8),
('f', ctypes.c_uint32 * 8),
('g', ctypes.c_uint32 * 16),
('first', ctypes.c_char),
('PADDING_0', ctypes.c_ubyte * 3),
]
PBKDF2_HMAC_SHA256_CTX = struct__PBKDF2_HMAC_SHA256_CTX
class struct__PBKDF2_HMAC_SHA512_CTX(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('odig', ctypes.c_uint64 * 8),
('idig', ctypes.c_uint64 * | |
if pdb not in pdct[uni]:
pdct[uni].append(pdb)
except KeyError:
pdct[uni] = [pdb]
pdct_rev[pdb] = uni
targets = []
with open(prots, 'r') as unisf:
for lp in unisf:
prot = lp.strip()
targets.append(prot)
#pdct_rev[prot] = lp.strip().upper()
try:
targets += pdct[lp.strip().upper()]
except KeyError:
pass
return targets, pdct_rev
def generate_similar_sigs(self, cmpd, sort=False, proteins=[], aux=False):
"""!
For a given compound, generate the similar compounds using distance of sigs.
@param cmpd object: Compound object
@param sort bool: Sort the list of similar compounds
@param proteins list: Protein objects to identify a subset of the Compound signature
@param aux bool: Use an auxiliary signature (default: False)
@return Returns list: Similar Compounds to the given Compound
"""
# find index of query compound, collect signatures for both
q = 0
c_sig = []
if proteins is None:
c_sig = cmpd.sig
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
c_sig.append(cmpd.sig[index])
else:
if aux:
c_sig = cmpd.aux_sig
else:
c_sig = cmpd.sig
ca = np.array([c_sig])
other_sigs = []
for ci in range(len(self.compounds)):
c = self.compounds[ci]
if cmpd.id_ == c.id_:
q = ci
other = []
if proteins is None:
other_sigs.append(c.sig)
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
other.append(c.sig[index])
other_sigs.append(other)
else:
if aux:
other_sigs.append(c.aux_sig)
else:
other_sigs.append(c.sig)
oa = np.array(other_sigs)
# call cdist, speed up with custom RMSD function
if self.dist_metric == "rmsd":
distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
cmpd.similar = []
# step through the cdist list - add RMSDs to Compound.similar list
n = len(self.compounds)
for i in range(n):
c2 = self.compounds[i]
if i == q:
continue
d = distances[0][i]
cmpd.similar.append((c2, d))
n += 1
if sort:
sorted_scores = sorted(cmpd.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cmpd.similar = sorted_scores
cmpd.similar_computed = True
cmpd.similar_sorted = True
return sorted_scores
else:
cmpd.similar_computed = True
return cmpd.similar
def generate_similar_sigs_cp(self, cmpd_pair, sort=False, proteins=[], aux=False):
"""!
For a given compound pair, generate the similar compound pairs using distance of sigs.
@param cmpd_pair object: Compound_pair object
@param sort bool: Sort the list of similar compounds
@param proteins list: Protein objects to identify a subset of the Compound signature
@param aux bool: Use an auxiliary signature (default: False)
@return Returns list: Similar Compounds to the given Compound
"""
# find index of query compound, collect signatures for both
q = 0
cp_sig = []
if proteins is None:
cp_sig = cmpd_pair.sig
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
cp_sig.append(cmpd_pair.sig[index])
else:
if aux:
cp_sig = cmpd_pair.aux_sig
else:
cp_sig = cmpd_pair.sig
ca = np.array([cp_sig])
other_sigs = []
for ci in range(len(self.compound_pairs)):
cp = self.compound_pairs[ci]
if cmpd_pair.id_ == cp.id_:
q = ci
other = []
if proteins is None:
other_sigs.append(cp.sig)
elif proteins:
for pro in proteins:
index = self.protein_id_to_index[pro.id_]
other.append(cp.sig[index])
other_sigs.append(other)
else:
if aux:
other_sigs.append(cp.aux_sig)
else:
other_sigs.append(cp.sig)
oa = np.array(other_sigs)
# call cdist, speed up with custom RMSD function
if self.dist_metric == "rmsd":
distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
cmpd_pair.similar = []
# step through the cdist list - add RMSDs to Compound.similar list
n = len(self.compound_pairs)
for i in range(n):
c2 = self.compound_pairs[i]
if i == q:
continue
d = distances[0][i]
cmpd_pair.similar.append((c2, d))
n += 1
if sort:
sorted_scores = sorted(cmpd_pair.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cmpd_pair.similar = sorted_scores
cmpd_pair.similar_computed = True
cmpd_pair.similar_sorted = True
return sorted_scores
else:
cmpd_pair.similar_computed = True
return cmpd_pair.similar
def generate_some_similar_sigs(self, cmpds, sort=False, proteins=[], aux=False):
"""!
For a given list of compounds, generate the similar compounds based on dist of sigs
This is pathways/genes for all intents and purposes
@param cmpds list: Compound objects
@param sort bool: Sort similar compounds for each Compound
@param proteins list: Protein objects to identify a subset of the Compound signature
@param aux bool: Use an auxiliary signature (default: False)
@return Returns list: Similar Compounds to the given Compound
"""
q = [cmpd.id_ for cmpd in cmpds]
if proteins is None:
ca = [cmpd.sig for cmpd in cmpds]
oa = [cmpd.sig for cmpd in self.compounds]
elif proteins:
index = [self.protein_id_to_index[pro.id_] for pro in proteins]
ca = [[cmpd.sig[i] for i in index] for cmpd in cmpds]
oa = [[cmpd.sig[i] for i in index] for cmpd in self.compounds]
else:
if aux:
ca = [cmpd.aux_sig for cmpd in cmpds]
oa = [cmpd.aux_sig for cmpd in self.compounds]
else:
ca = [cmpd.sig for cmpd in cmpds]
oa = [cmpd.sig for cmpd in self.compounds]
ca = np.asarray(ca)
oa = np.asarray(oa)
# call cdist, speed up with custom RMSD function
if self.dist_metric == "rmsd":
distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)
elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:
distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)
else:
print("Incorrect distance metric - {}".format(self.dist_metric))
# step through the cdist list - add RMSDs to Compound.similar list
n = len(self.compounds)
for j in range(len(cmpds)):
cmpds[j].similar = []
for i in range(n):
c2 = self.compounds[i]
id2 = c2.id_
if id2 == q[j]:
continue
d = distances[j][i]
cmpds[j].similar.append((c2, d))
if sort:
sorted_scores = sorted(cmpds[j].similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)
cmpds[j].similar = sorted_scores
cmpds[j].similar_computed = True
cmpds[j].similar_sorted = True
else:
cmpds[j].similar_computed = True
def quantify_pathways(self, indication=None):
"""!
Uses the pathway quantifier defined in the CANDO instantiation to make a
pathway signature for all pathways in the input file (NOTE: does not compute distances)
@param indication object: Indication object
@return Returns None
"""
pq = self.pathway_quantifier
if pq == 'max':
func = max
elif pq == 'sum':
func = sum
elif pq == 'avg':
func = np.average
elif pq == 'proteins':
if not self.indication_pathways:
print('Pathway quantifier "proteins" should only be used in combination with a '
'pathway-disease mapping (indication_pathways), quitting.')
quit()
func = None
else:
print('Please enter a proper pathway quantify method, quitting.')
func = None
quit()
# this is a recursive function for checking if the pathways have proteins
def check_proteins(paths):
pl = [] # list of pathways with >1 protein
n = 0
for path in paths:
if len(path.proteins) > 0:
pl.append(path)
n += 1
if n > 0:
return pl
else:
print('The associated pathways for this indication ({}) do not have enough proteins, '
'using all pathways'.format(indication.id_))
return check_proteins(self.pathways)
if indication:
if len(indication.pathways) == 0:
print('Warning: {} does not have any associated pathways - using all pathways'.format(indication.name))
pws = self.pathways
else:
pws = check_proteins(indication.pathways)
else:
pws = check_proteins(self.pathways)
for ci in range(len(self.compounds)):
pw_sig_all = []
c = self.compounds[ci]
for pw in pws:
if len(pw.proteins) == 0:
print('No associated proteins for pathway {}, skipping'.format(pw.id_))
continue
pw_sig = []
for p in pw.proteins:
ch = p.id_
ch_i = self.protein_id_to_index[ch]
pw_sig.append(c.sig[ch_i])
if pq == 'proteins':
pw_sig_all += pw_sig
else:
pw_sig_all.append(pw_sig)
if pq != 'proteins':
c.aux_sig = list(map(func, pw_sig_all))
else:
c.aux_sig = pw_sig_all
def results_analysed(self, f, metrics, effect_type):
"""!
Creates the results analysed named file for the benchmarking and
computes final avg indication accuracies
@param f str: File path for results analysed named
@param metrics list: Cutoffs used for the benchmarking protocol
@param effect_type str: Defines the effect as either an Indication (disease) or ADR (adverse reaction)
@return Returns dct: dict of accuracies at each cutoff
"""
fo = open(f, 'w')
effects = list(self.accuracies.keys())
# Write header
fo.write("{0}_id\tcmpds_per_{0}\ttop10\ttop25\ttop50\ttop100\ttopAll\ttop1%\t"
"top5%\ttop10%\ttop50%\ttop100%\t{0}_name\n".format(effect_type))
effects_sorted = sorted(effects, key=lambda x: (len(x[0].compounds), x[0].id_))[::-1]
l = len(effects)
final_accs = {}
for m in metrics:
final_accs[m] = 0.0
for effect, c in effects_sorted:
fo.write("{0}\t{1}\t".format(effect.id_, c))
accs = self.accuracies[(effect, c)]
for m in metrics:
n = accs[m]
y = str(n / c * 100)[0:4]
fo.write("{}\t".format(y))
final_accs[m] += n / c / l
fo.write("{}\n".format(effect.name))
fo.close()
return final_accs
def canbenchmark(self, file_name, indications=[], continuous=False, bottom=False,
ranking='standard', adrs=False):
"""!
Benchmarks the platform based on compound similarity of those approved | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Licensed under a MIT style license - see LICENSE.rst
""" Access spectroscopic data for a single BOSS target.
"""
from __future__ import division, print_function
from six import binary_type
import re
import numpy as np
import numpy.ma
import fitsio
import astropy.table
import bossdata.raw
def get_fiducial_pixel_index(wavelength):
"""
Convert a wavelength to a fiducial pixel index.
The fiducial wavelength grid used by all SDSS co-added spectra is
logarithmically spaced::
wavelength = wavelength0 * 10**(coef * index)
The value ``coef = 1e-4`` is encoded in the FITS HDU headers of SDSS
coadded data files with the keyword ``CD1_1`` (and sometimes also
``COEFF1``). The value of ``wavelength0`` defines ``index = 0`` and is
similarly encoded as ``CRVAL1`` (and sometimes also ``COEFF0``). However,
its value is not constant between different SDSS co-added spectra because
varying amounts of invalid data are trimmed. This function adopts the
constant value 3500.26 Angstrom corresponding to ``index = 0``:
>>> get_fiducial_pixel_index(3500.26)
0.0
Note that the return value is a float so that wavelengths not on the
fiducial grid can be converted and detected:
>>> get_fiducial_pixel_index(3500.5)
0.29776960129179741
The calculation is automatically broadcast over an input wavelength array:
>>> wlen = np.arange(4000,4400,100)
>>> get_fiducial_pixel_index(wlen)
array([ 579.596863 , 686.83551692, 791.4898537 , 893.68150552])
Use :attr:`fiducial_pixel_index_range` for an index range that covers all
SDSS spectra and :attr:`fiducial_loglam` to covert integer indices to
wavelengths.
Args:
wavelength(float): Input wavelength in Angstroms.
Returns:
numpy.ndarray: Array of floating-point indices relative to the fiducial
wavelength grid.
"""
return (np.log10(wavelength) - _fiducial_log10lam0)/_fiducial_coef
_fiducial_coef = 1e-4
_fiducial_log10lam0 = np.log10(3500.26)
fiducial_pixel_index_range = (0, 4800)
"""
Range of fiducial pixel indices that covers all spectra.
Use :func:`get_fiducial_pixel_index` to calculate fiducial pixel indices.
"""
fiducial_loglam = (_fiducial_log10lam0 +
_fiducial_coef * np.arange(*fiducial_pixel_index_range))
"""
Array of fiducial log10(wavelength in Angstroms) covering all spectra.
Lookup the log10(wavelength) or wavelength corresponding to a particular
integral pixel index using:
>>> fiducial_loglam[100]
3.554100305027835
>>> 10**fiducial_loglam[100]
3581.7915291606305
The bounding wavelengths of this range are:
>>> 10**fiducial_loglam[[0,-1]]
array([ 3500.26 , 10568.18251472])
The :meth:`SpecFile.get_valid_data` and :meth:`PlateFile.get_valid_data()
<bossdata.plate.PlateFile.get_valid_data>` methods provide a ``fiducial_grid``
option that returns data using this grid.
"""
class Exposures(object):
"""Table of exposure info extracted from FITS header keywords.
Parse the NEXP and EXPIDnn keywords that are present in the header of HDU0
in :datamodel:`spPlate <PLATE4/spPlate>` and :datamodel:`spec
<spectra/PLATE4/spec>` FITS files.
The constructor initializes the ``table`` attribute with column names
``offset``, ``camera``, ``science``, ``flat`` and ``arc``, and creates one
row for each keyword EXPIDnn, where ``offset`` equals the keyword sequence
number nn, ``camera`` is one of b1, b2, r1, r2, and the remaining columns
record the science and calibration exposure numbers.
Use :meth:`get_info` to retrieve the n-th exposure for a particular camera
(b1, b2, r1, r2). Note that when this class is initialized from a
:datamodel:`spec file <spectra/PLATE4/spec>` header, it will only describe
the two cameras of a single spectrograph (b1+r1 or b2+r2). The `num_by_camera`
attribute is a dictionary of ints indexed by camera that records the number
of science exposures available for that camera.
Args:
header(dict): dictionary of FITS header keyword, value pairs.
Returns:
"""
def __init__(self, header):
num_exposures = header['NEXP']
expid_pattern = re.compile('([br][12])-([0-9]{8})-([0-9]{8})-([0-9]{8})')
exposure_set = set()
self.table = astropy.table.Table(
names=('offset', 'camera', 'science', 'flat', 'arc'),
dtype=('i4', 'S2', 'i4', 'i4', 'i4'))
self.num_by_camera = dict(b1=0, b2=0, r1=0, r2=0)
for i in range(num_exposures):
camera, science_num, flat_num, arc_num = expid_pattern.match(
header['EXPID{0:02d}'.format(i + 1)]).groups()
self.table.add_row((i, camera, int(science_num), int(flat_num), int(arc_num)))
exposure_set.add(int(science_num))
self.num_by_camera[camera] += 1
self.sequence = sorted(exposure_set)
# Check that the science exposures listed for each camera are self consistent.
num_exposures = len(self.sequence)
for camera in ('b1', 'b2', 'r1', 'r2'):
if self.num_by_camera[camera] == 0:
continue
if self.num_by_camera[camera] != num_exposures:
raise RuntimeError('Found {} {} exposures but expected {}.'.format(
self.num_by_camera[camera], camera, num_exposures))
# Conversion to binary_type is needed for backwards compatibility with
# astropy < 2.0 and python 3. For details, see:
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-python-3
camera_rows = self.table['camera'] == binary_type(camera, 'ascii')
camera_exposures = set(self.table[camera_rows]['science'])
if camera_exposures != exposure_set:
raise RuntimeError('Found inconsistent {} exposures: {}. Expected: {}.'.format(
camera, camera_exposures, exposure_set))
def get_info(self, exposure_index, camera):
"""Get information about a single camera exposure.
Args:
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
Returns:
A structured array with information about the requested exposure,
corresponding to one row of our ``table`` attribute.
Raises:
ValueError: Invalid exposure_index or camera.
RuntimeError: Exposure not present.
"""
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}", expected b1, b2, r1, or r2.'.format(camera))
if self.num_by_camera[camera] == 0:
raise ValueError('There are no {} exposures available.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
science_num = self.sequence[exposure_index]
row = (self.table['science'] == science_num) & (
self.table['camera'] == binary_type(camera, 'ascii'))
if not np.any(row):
# This should never happen after our self-consistency checks in the ctor.
raise RuntimeError('No exposure[{}] = {:08d} found for {}.'.format(
exposure_index, science_num, camera))
if np.count_nonzero(row) > 1:
# This should never happen after our self-consistency checks in the ctor.
raise RuntimeError(
'Found multiple {} exposures[{}].'.format(camera, exposure_index))
return self.table[row][0]
def get_exposure_name(self, exposure_index, camera, ftype='spCFrame'):
"""Get the file name of a single science or calibration exposure data product.
Use the exposure name to locate FITS data files associated with
individual exposures. The supported file types are:
:datamodel:`spCFrame <PLATE4/spCFrame>`,
:datamodel:`spFrame <PLATE4/spFrame>`,
:datamodel:`spFluxcalib <PLATE4/spFluxcalib>`
:datamodel:`spFluxcorr <PLATE4/spFluxcorr>`,
:datamodel:`spArc <PLATE4/spArc>`,
:datamodel:`spFlat <PLATE4/spFlat>`. This method is analogous to
:meth:`bossdata.plate.Plan.get_exposure_name`, but operates for a single
target and only knows about exposures actually used in the final co-add
(including the associated arc and flat exposures).
Args:
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
ftype(str): Type of exposure file whose name to return. Must be one of
spCFrame, spFrame, spFluxcalib, spFluxcorr, spArc, spFlat. An spCFrame
is assumed to be uncompressed, and all other files are assumed to be
compressed. When a calibration is requested (spArc, spFlat) results from
the calibration exposure used to analyze the specified science exposure
is returned.
Returns:
str: Exposure name of the form [ftype]-[cc]-[eeeeeeee].[ext] where [cc]
identifies the camera (one of b1,r1,b2,r2) and [eeeeeeee] is the
zero-padded arc/flat/science exposure number. The extension [ext]
is "fits" for spCFrame files and "fits.gz" for all other file types.
Raises:
ValueError: one of the inputs is invalid.
"""
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}", expected b1, b2, r1, or r2.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
ftypes = ('spCFrame', 'spFrame', 'spFluxcalib', 'spFluxcorr', 'spArc', 'spFlat')
if ftype not in ftypes:
raise ValueError('Invalid file type ({}) must be one of: {}.'
.format(ftype, ', '.join(ftypes)))
# Get the science exposure ID number for the requested seqence number 0,1,...
exposure_info = self.get_info(exposure_index, camera)
if ftype == 'spArc':
exposure_id = exposure_info['arc']
elif ftype == 'spFlat':
exposure_id = exposure_info['flat']
else:
exposure_id = exposure_info['science']
name = '{0}-{1}-{2:08d}.fits'.format(ftype, camera, exposure_id)
if ftype != 'spCFrame':
name += '.gz'
return name
def get_raw_image(self, plate, mjd, exposure_index, camera, flavor='science',
finder=None, mirror=None):
"""Get the raw image file associated with an exposure.
Args:
plate(int): Plate number, which must be positive.
mjd(int): Modified Julian date of the observation, which must be > 45000.
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
flavor(str): One of science, arc, flat.
finder(bossdata.path.Finder): Object used to find the names of BOSS data files.
If not specified, the default Finder constructor is used.
mirror(bossdata.remote.Manager): Object used to interact with the local mirror
of BOSS data. If not specified, the default Manager constructor is used.
Returns:
bossdata.raw.RawImageFile: requested raw image file.
Raises:
ValueError: one of the inputs is invalid.
"""
if plate < 0:
raise ValueError('Invalid plate number ({}) must be > 0.'.format(plate))
if mjd <= 45000:
raise ValueError('Invalid mjd ({}) must be >= 45000.'.format(mjd))
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}". Expected one of b1, b2, r1, r2.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
if flavor not in | |
<filename>openpnm/io/Statoil.py
import os
import numpy as np
from openpnm.topotools import trim, extend
from openpnm.utils import logging
from openpnm.io import GenericIO
from openpnm.network import GenericNetwork
from openpnm.geometry import GenericGeometry
import openpnm.models as mods
from pathlib import Path
from pandas import read_table, DataFrame
from tqdm import tqdm
logger = logging.getLogger(__name__)
class Statoil(GenericIO):
r"""
The StatOil format is used by the Maximal Ball network extraction code of
the Imperial College London group
This class can be used to load and work with those networks. Numerous
datasets are available for download from the group's
`website <http://tinyurl.com/zurko4q>`_.
The 'Statoil' format consists of 4 different files in a single
folder. The data is stored in columns with each corresponding to a
specific property. Headers are not provided in the files, so one must
refer to various theses and documents to interpret their meaning.
"""
@classmethod
def export_data(cls, network, shape, prefix=None, path=None, Pin=None, Pout=None):
r"""
Parameters
----------
network : OpenPNM Network object
The network
shape : array_like
An ndim-by-1 array or list containing the network dimensions
in physical units (i.e. um)
prefix : str
The prefix to append to each file name, such as
``<prefix>_node1.dat``. If not provided ``project.name`` is used.
path : str or path object
The location where the exported files should be stored. If not
provided the current working directory is used
Pinlet and Poutlet : scalar, int (optional)
The pore index of the inlet and outlet reservoir pores. If not
provided then it is assumed they are the second last and last
pores in the network, respectively. This would be the case if
the ``add_reservoir_pore`` function had been called prior to
exporting.
"""
if path is None:
path = os.getcwd()
p = Path(path)
if prefix is None:
prefix = network.project.name
# Deal with reservoir pores
if Pin is None:
Pin = network.Np - 2
if Pout is None:
Pout = network.Np - 1
Ptemp = network.find_neighbor_pores(pores=Pin)
inlets = np.zeros_like(network.Ps, dtype=bool)
inlets[Ptemp] = True
Ptemp = network.find_neighbor_pores(pores=Pout)
outlets = np.zeros_like(network.Ps, dtype=bool)
outlets[Ptemp] = True
# Write link 1 file
props = ['throat.conns',
'throat.diameter',
'throat.shape_factor',
'throat.total_length']
with open(p.joinpath(prefix + '_link1.dat'), 'wt') as f:
f.write(str(network.Nt) + '\n')
for row in tqdm(network.throats(), desc='Writing Link1 file'):
s = ''
s = s + '{:>9}'.format(str(row+1))
for col in props:
try:
val = network[col][row]
except KeyError:
val = 0.0
if col == 'throat.conns':
val = np.copy(val)
val[val == network.Np - 1] = -1
val[val == (network.Np - 2)] = -2
s = s + '{:>9}'.format(str(val[0] + 1))
s = s + '{:>9}'.format(str(val[1] + 1))
continue
if isinstance(val, float):
if 'diameter' in col: # Convert to radius
val = val/2
if np.isnan(val):
val = 0.0
val = np.format_float_scientific(val, precision=6,
exp_digits=3,
trim='k',
unique=False)
s = s + '{:>15}'.format(str(val))
s = s + '\n' # Remove trailing tab and add a new line
f.write(s)
# Write Link 2 file
props = ['throat.conns',
'throat.conduit_lengths.pore1',
'throat.conduit_lengths.pore2',
'throat.conduit_lengths.throat',
'throat.volume',
'throat.clay_volume']
with open(p.joinpath(prefix + '_link2.dat'), 'wt') as f:
for row in tqdm(network.throats(), desc='Writing Link2 file'):
s = ''
s = s + '{:>9}'.format(str(row+1))
for col in props:
try:
val = network[col][row]
except KeyError:
val = 0.0
if col == 'throat.conns':
val = np.copy(val)
val[val == network.Np - 1] = -1
val[val == (network.Np - 2)] = -2
# Original file has 7 spaces for pore indices, but
# this is not enough for networks with > 10 million
# pores so I have bumped it to 9. I'm not sure if
# this will still work with the ICL binaries.
s = s + '{:>9}'.format(str(val[0] + 1))
s = s + '{:>9}'.format(str(val[1] + 1))
continue
if isinstance(val, float):
if np.isnan(val):
val = 0.0
val = np.format_float_scientific(val, precision=6,
exp_digits=3,
trim='k',
unique=False)
s = s + '{:>15}'.format(str(val))
s = s + '\n' # Remove trailing tab and a new line
f.write(s)
# Write Node 1 file
with open(p.joinpath(prefix + '_node1.dat'), 'wt') as f:
s = ''
s = s + str(network.num_pores('reservoir', 'not'))
for d in shape:
val = np.format_float_scientific(d, precision=6, exp_digits=3,
trim='k', unique=False)
s = s + '{:>17}'.format(str(val))
s = s + '\n'
f.write(s)
for row in tqdm(network.pores('reservoir', mode='not'),
desc='Writing Node1 file'):
if row in [Pin, Pout]:
continue
s = ''
s = s + '{:>9}'.format(str(row+1))
for c in network['pore.coords'][row]:
if isinstance(c, float):
c = np.format_float_scientific(c, precision=6,
exp_digits=3,
trim='k',
unique=False)
s = s + '{:>15}'.format(str(c))
s = s + '{:>9}'.format(str(network.num_neighbors(row)[0]))
for n in network.find_neighbor_pores(row):
if n == Pin:
n = 0
elif n == Pout:
n = -1
else:
n = n + 1
s = s + '{:>9}'.format(str(n))
s = s + '{:>9}'.format(str(int(inlets[row])))
s = s + '{:>9}'.format(str(int(outlets[row])))
for n in network.find_neighbor_throats(row):
s = s + '{:>9}'.format(str(n + 1))
s = s + '\n' # Remove trailing tab and a new line
f.write(s)
# Write Node 2 file
props = ['pore.volume',
'pore.diameter',
'pore.shape_factor',
'pore.clay_volume']
with open(p.joinpath(prefix + '_node2.dat'), 'wt') as f:
for row in tqdm(network.pores('reservoir', mode='not'),
desc='Writing Node2 file'):
s = ''
s = s + '{:>9}'.format(str(row+1))
for col in props:
try:
val = network[col][row]
except KeyError:
val = 0.0
if isinstance(val, float):
if 'diameter' in col:
val = val/2
val = np.format_float_scientific(val, precision=6,
exp_digits=3,
trim='k',
unique=False)
s = s + '{:>15}'.format(str(val))
s = s + '\n' # Remove trailing tab and a new line
f.write(s)
@classmethod
def load(cls, *args, **kwargs):
r"""
This method is being deprecated. Use ``import_data`` instead.
"""
return cls.import_data(*args, **kwargs)
@classmethod
def import_data(cls, path, prefix, network=None):
r"""
Load data from the \'dat\' files located in specified folder.
Parameters
----------
path : string
The full path to the folder containing the set of \'dat\' files.
prefix : string
The file name prefix on each file. The data files are stored
as \<prefix\>_node1.dat.
network : OpenPNM Network Object
If given then the data will be loaded on it and returned. If not
given, a Network will be created and returned.
Returns
-------
An OpenPNM Project containing a GenericNetwork holding all the data
"""
net = {}
# Parse the link1 file
path = Path(path)
filename = Path(path.resolve(), prefix+'_link1.dat')
with open(filename, mode='r') as f:
link1 = read_table(filepath_or_buffer=f,
header=None,
skiprows=1,
sep=' ',
skipinitialspace=True,
index_col=0)
link1.columns = ['throat.pore1', 'throat.pore2', 'throat.radius',
'throat.shape_factor', 'throat.total_length']
# Add link1 props to net
net['throat.conns'] = np.vstack((link1['throat.pore1']-1,
link1['throat.pore2']-1)).T
net['throat.conns'] = np.sort(net['throat.conns'], axis=1)
net['throat.radius'] = np.array(link1['throat.radius'])
net['throat.shape_factor'] = np.array(link1['throat.shape_factor'])
net['throat.total_length'] = np.array(link1['throat.total_length'])
filename = Path(path.resolve(), prefix+'_link2.dat')
with open(filename, mode='r') as f:
link2 = read_table(filepath_or_buffer=f,
header=None,
sep=' ',
skipinitialspace=True,
index_col=0)
link2.columns = ['throat.pore1', 'throat.pore2',
'throat.pore1_length', 'throat.pore2_length',
'throat.length', 'throat.volume',
'throat.clay_volume']
# Add link2 props to net
cl_t = np.array(link2['throat.length'])
net['throat.length'] = cl_t
net['throat.conduit_lengths.throat'] = cl_t
net['throat.volume'] = np.array(link2['throat.volume'])
cl_p1 = np.array(link2['throat.pore1_length'])
net['throat.conduit_lengths.pore1'] = cl_p1
cl_p2 = np.array(link2['throat.pore2_length'])
net['throat.conduit_lengths.pore2'] = cl_p2
net['throat.clay_volume'] = np.array(link2['throat.clay_volume'])
# ---------------------------------------------------------------------
# Parse the node1 file
filename = Path(path.resolve(), prefix+'_node1.dat')
with open(filename, mode='r') as f:
row_0 = f.readline().split()
num_lines = int(row_0[0])
array = np.ndarray([num_lines, 6])
for i in range(num_lines):
row = f.readline()\
.replace('\t', ' ').replace('\n', ' ').split()
array[i, :] = row[0:6]
node1 = DataFrame(array[:, [1, 2, 3, 4]])
node1.columns = ['pore.x_coord', 'pore.y_coord', 'pore.z_coord',
'pore.coordination_number']
# Add node1 props to net
net['pore.coords'] = np.vstack((node1['pore.x_coord'],
node1['pore.y_coord'],
node1['pore.z_coord'])).T
# ---------------------------------------------------------------------
# Parse the node2 file
filename = Path(path.resolve(), prefix+'_node2.dat')
with open(filename, mode='r') as f:
node2 = read_table(filepath_or_buffer=f,
header=None,
sep=' ',
skipinitialspace=True,
index_col=0)
node2.columns = ['pore.volume', 'pore.radius', 'pore.shape_factor',
'pore.clay_volume']
# Add node2 props to net
net['pore.volume'] = np.array(node2['pore.volume'])
net['pore.radius'] = np.array(node2['pore.radius'])
net['pore.shape_factor'] = np.array(node2['pore.shape_factor'])
net['pore.clay_volume'] = np.array(node2['pore.clay_volume'])
net['throat.area'] = ((net['throat.radius']**2)
/ (4.0*net['throat.shape_factor']))
net['pore.area'] = ((net['pore.radius']**2)
/ (4.0*net['pore.shape_factor']))
if network is None:
network = GenericNetwork()
network = cls._update_network(network=network, net=net)
# Use OpenPNM Tools to clean up network
# Trim throats connected to 'inlet' or 'outlet' reservoirs
trim1 = np.where(np.any(net['throat.conns'] == -1, axis=1))[0]
# Apply 'outlet' label to these pores
outlets = network['throat.conns'][trim1, 1]
network['pore.outlets'] = False
network['pore.outlets'][outlets] = True
trim2 = np.where(np.any(net['throat.conns'] == -2, axis=1))[0]
# Apply 'inlet' label to these pores
inlets = network['throat.conns'][trim2, 1]
network['pore.inlets'] = False
network['pore.inlets'][inlets] = True
# Now trim the throats
to_trim = np.hstack([trim1, trim2])
trim(network=network, | |
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~device_update.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~device_update.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~device_update.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~device_update.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class GroupInformation(ProxyResource):
"""The group information for creating a private endpoint on an Account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~device_update.models.SystemData
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
:ivar provisioning_state: The provisioning state of private link group ID. Possible values
include: "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or ~device_update.models.GroupIdProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(GroupInformation, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
self.provisioning_state = None
class PrivateLinkResourceProperties(msrest.serialization.Model):
"""Properties of a private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(PrivateLinkResourceProperties, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class GroupInformationProperties(PrivateLinkResourceProperties):
"""The properties for a group information object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
:ivar provisioning_state: The provisioning state of private link group ID. Possible values
include: "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or ~device_update.models.GroupIdProvisioningState
"""
_validation = {
'group_id': {'readonly': True},
'required_members': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(GroupInformationProperties, self).__init__(required_zone_names=required_zone_names, **kwargs)
self.provisioning_state = None
class Instance(TrackedResource):
"""Device Update instance details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~device_update.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar provisioning_state: Provisioning state. Possible values include: "Succeeded", "Deleted",
"Failed", "Canceled", "Accepted", "Creating".
:vartype provisioning_state: str or ~device_update.models.ProvisioningState
:ivar account_name: Parent Device Update Account name which Instance belongs to.
:vartype account_name: str
:param iot_hubs: List of IoT Hubs associated with the account.
:type iot_hubs: list[~device_update.models.IotHubSettings]
:param enable_diagnostics: Enables or Disables the diagnostic logs collection.
:type enable_diagnostics: bool
:param diagnostic_storage_properties: Customer-initiated diagnostic log collection storage
properties.
:type diagnostic_storage_properties: ~device_update.models.DiagnosticStorageProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'account_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'account_name': {'key': 'properties.accountName', 'type': 'str'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHubSettings]'},
'enable_diagnostics': {'key': 'properties.enableDiagnostics', 'type': 'bool'},
'diagnostic_storage_properties': {'key': 'properties.diagnosticStorageProperties', 'type': 'DiagnosticStorageProperties'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
iot_hubs: Optional[List["IotHubSettings"]] = None,
enable_diagnostics: Optional[bool] = None,
diagnostic_storage_properties: Optional["DiagnosticStorageProperties"] = None,
**kwargs
):
super(Instance, self).__init__(tags=tags, location=location, **kwargs)
self.provisioning_state = None
self.account_name = None
self.iot_hubs = iot_hubs
self.enable_diagnostics = enable_diagnostics
self.diagnostic_storage_properties = diagnostic_storage_properties
class InstanceList(msrest.serialization.Model):
"""List of Instances.
:param next_link: The link used to get the next page of Instances list.
:type next_link: str
:param value: List of Instances.
:type value: list[~device_update.models.Instance]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[Instance]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["Instance"]] = None,
**kwargs
):
super(InstanceList, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class IotHubSettings(msrest.serialization.Model):
"""Device Update account integration with IoT Hub settings.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. IoTHub resource ID.
:type resource_id: str
:param io_t_hub_connection_string: IoTHub connection string.
:type io_t_hub_connection_string: str
:param event_hub_connection_string: EventHub connection string.
:type event_hub_connection_string: str
"""
_validation = {
'resource_id': {'required': True, 'max_length': 244, 'min_length': 108},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'io_t_hub_connection_string': {'key': 'ioTHubConnectionString', 'type': 'str'},
'event_hub_connection_string': {'key': 'eventHubConnectionString', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: str,
io_t_hub_connection_string: Optional[str] = None,
event_hub_connection_string: Optional[str] = None,
**kwargs
):
super(IotHubSettings, self).__init__(**kwargs)
self.resource_id = resource_id
self.io_t_hub_connection_string = io_t_hub_connection_string
| |
str, Enum)):
ID = "id"
BASE_UNIT_OF_MEASURE_ID = "baseUnitOfMeasureId"
BLOCKED = "blocked"
DISPLAY_NAME = "displayName"
GTIN = "gtin"
INVENTORY = "inventory"
ITEM_CATEGORY_CODE = "itemCategoryCode"
ITEM_CATEGORY_ID = "itemCategoryId"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PRICE_INCLUDES_TAX = "priceIncludesTax"
TAX_GROUP_CODE = "taxGroupCode"
TAX_GROUP_ID = "taxGroupId"
TYPE = "type"
UNIT_COST = "unitCost"
UNIT_PRICE = "unitPrice"
ITEM_CATEGORY = "itemCategory"
PICTURE = "picture"
class Enum288(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ITEM_CATEGORY = "itemCategory"
PICTURE = "picture"
class Enum289(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum29(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BLOCKED = "blocked"
CATEGORY = "category"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
SUB_CATEGORY = "subCategory"
class Enum290(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
HEIGHT = "height"
HEIGHT_DESC = "height desc"
WIDTH = "width"
WIDTH_DESC = "width desc"
class Enum291(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum292(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum293(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
ACCOUNT_ID = "accountId"
ACCOUNT_ID_DESC = "accountId desc"
AMOUNT_EXCLUDING_TAX = "amountExcludingTax"
AMOUNT_EXCLUDING_TAX_DESC = "amountExcludingTax desc"
AMOUNT_INCLUDING_TAX = "amountIncludingTax"
AMOUNT_INCLUDING_TAX_DESC = "amountIncludingTax desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_AMOUNT_DESC = "discountAmount desc"
DISCOUNT_APPLIED_BEFORE_TAX = "discountAppliedBeforeTax"
DISCOUNT_APPLIED_BEFORE_TAX_DESC = "discountAppliedBeforeTax desc"
DISCOUNT_PERCENT = "discountPercent"
DISCOUNT_PERCENT_DESC = "discountPercent desc"
DOCUMENT_ID = "documentId"
DOCUMENT_ID_DESC = "documentId desc"
ITEM_ID = "itemId"
ITEM_ID_DESC = "itemId desc"
LINE_TYPE = "lineType"
LINE_TYPE_DESC = "lineType desc"
NET_AMOUNT = "netAmount"
NET_AMOUNT_DESC = "netAmount desc"
NET_AMOUNT_INCLUDING_TAX = "netAmountIncludingTax"
NET_AMOUNT_INCLUDING_TAX_DESC = "netAmountIncludingTax desc"
NET_TAX_AMOUNT = "netTaxAmount"
NET_TAX_AMOUNT_DESC = "netTaxAmount desc"
QUANTITY = "quantity"
QUANTITY_DESC = "quantity desc"
SEQUENCE = "sequence"
SEQUENCE_DESC = "sequence desc"
TAX_CODE = "taxCode"
TAX_CODE_DESC = "taxCode desc"
TAX_PERCENT = "taxPercent"
TAX_PERCENT_DESC = "taxPercent desc"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
TOTAL_TAX_AMOUNT_DESC = "totalTaxAmount desc"
UNIT_OF_MEASURE_ID = "unitOfMeasureId"
UNIT_OF_MEASURE_ID_DESC = "unitOfMeasureId desc"
UNIT_PRICE = "unitPrice"
UNIT_PRICE_DESC = "unitPrice desc"
class Enum294(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCOUNT_ID = "accountId"
AMOUNT_EXCLUDING_TAX = "amountExcludingTax"
AMOUNT_INCLUDING_TAX = "amountIncludingTax"
DESCRIPTION = "description"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_APPLIED_BEFORE_TAX = "discountAppliedBeforeTax"
DISCOUNT_PERCENT = "discountPercent"
DOCUMENT_ID = "documentId"
ITEM_ID = "itemId"
LINE_TYPE = "lineType"
NET_AMOUNT = "netAmount"
NET_AMOUNT_INCLUDING_TAX = "netAmountIncludingTax"
NET_TAX_AMOUNT = "netTaxAmount"
QUANTITY = "quantity"
SEQUENCE = "sequence"
TAX_CODE = "taxCode"
TAX_PERCENT = "taxPercent"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
UNIT_OF_MEASURE_ID = "unitOfMeasureId"
UNIT_PRICE = "unitPrice"
ACCOUNT = "account"
ITEM = "item"
class Enum295(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ACCOUNT = "account"
ITEM = "item"
class Enum296(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCOUNT_ID = "accountId"
AMOUNT_EXCLUDING_TAX = "amountExcludingTax"
AMOUNT_INCLUDING_TAX = "amountIncludingTax"
DESCRIPTION = "description"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_APPLIED_BEFORE_TAX = "discountAppliedBeforeTax"
DISCOUNT_PERCENT = "discountPercent"
DOCUMENT_ID = "documentId"
ITEM_ID = "itemId"
LINE_TYPE = "lineType"
NET_AMOUNT = "netAmount"
NET_AMOUNT_INCLUDING_TAX = "netAmountIncludingTax"
NET_TAX_AMOUNT = "netTaxAmount"
QUANTITY = "quantity"
SEQUENCE = "sequence"
TAX_CODE = "taxCode"
TAX_PERCENT = "taxPercent"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
UNIT_OF_MEASURE_ID = "unitOfMeasureId"
UNIT_PRICE = "unitPrice"
ACCOUNT = "account"
ITEM = "item"
class Enum297(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ACCOUNT = "account"
ITEM = "item"
class Enum298(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BLOCKED = "blocked"
CATEGORY = "category"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
SUB_CATEGORY = "subCategory"
class Enum299(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BASE_UNIT_OF_MEASURE_ID = "baseUnitOfMeasureId"
BLOCKED = "blocked"
DISPLAY_NAME = "displayName"
GTIN = "gtin"
INVENTORY = "inventory"
ITEM_CATEGORY_CODE = "itemCategoryCode"
ITEM_CATEGORY_ID = "itemCategoryId"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PRICE_INCLUDES_TAX = "priceIncludesTax"
TAX_GROUP_CODE = "taxGroupCode"
TAX_GROUP_ID = "taxGroupId"
TYPE = "type"
UNIT_COST = "unitCost"
UNIT_PRICE = "unitPrice"
ITEM_CATEGORY = "itemCategory"
PICTURE = "picture"
class Enum30(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
AMOUNT = "amount"
AMOUNT_DESC = "amount desc"
APPLIES_TO_INVOICE_ID = "appliesToInvoiceId"
APPLIES_TO_INVOICE_ID_DESC = "appliesToInvoiceId desc"
APPLIES_TO_INVOICE_NUMBER = "appliesToInvoiceNumber"
APPLIES_TO_INVOICE_NUMBER_DESC = "appliesToInvoiceNumber desc"
COMMENT = "comment"
COMMENT_DESC = "comment desc"
CONTACT_ID = "contactId"
CONTACT_ID_DESC = "contactId desc"
CUSTOMER_ID = "customerId"
CUSTOMER_ID_DESC = "customerId desc"
CUSTOMER_NUMBER = "customerNumber"
CUSTOMER_NUMBER_DESC = "customerNumber desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DOCUMENT_NUMBER = "documentNumber"
DOCUMENT_NUMBER_DESC = "documentNumber desc"
EXTERNAL_DOCUMENT_NUMBER = "externalDocumentNumber"
EXTERNAL_DOCUMENT_NUMBER_DESC = "externalDocumentNumber desc"
JOURNAL_DISPLAY_NAME = "journalDisplayName"
JOURNAL_DISPLAY_NAME_DESC = "journalDisplayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
LINE_NUMBER = "lineNumber"
LINE_NUMBER_DESC = "lineNumber desc"
POSTING_DATE = "postingDate"
POSTING_DATE_DESC = "postingDate desc"
class Enum300(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ITEM_CATEGORY = "itemCategory"
PICTURE = "picture"
class Enum301(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum302(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
HEIGHT = "height"
HEIGHT_DESC = "height desc"
WIDTH = "width"
WIDTH_DESC = "width desc"
class Enum303(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum304(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum305(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
ACCEPTED_DATE = "acceptedDate"
ACCEPTED_DATE_DESC = "acceptedDate desc"
BILLING_POSTAL_ADDRESS = "billingPostalAddress"
BILLING_POSTAL_ADDRESS_DESC = "billingPostalAddress desc"
BILL_TO_CUSTOMER_ID = "billToCustomerId"
BILL_TO_CUSTOMER_ID_DESC = "billToCustomerId desc"
BILL_TO_CUSTOMER_NUMBER = "billToCustomerNumber"
BILL_TO_CUSTOMER_NUMBER_DESC = "billToCustomerNumber desc"
BILL_TO_NAME = "billToName"
BILL_TO_NAME_DESC = "billToName desc"
CURRENCY_CODE = "currencyCode"
CURRENCY_CODE_DESC = "currencyCode desc"
CURRENCY_ID = "currencyId"
CURRENCY_ID_DESC = "currencyId desc"
CUSTOMER_ID = "customerId"
CUSTOMER_ID_DESC = "customerId desc"
CUSTOMER_NAME = "customerName"
CUSTOMER_NAME_DESC = "customerName desc"
CUSTOMER_NUMBER = "customerNumber"
CUSTOMER_NUMBER_DESC = "customerNumber desc"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_AMOUNT_DESC = "discountAmount desc"
DOCUMENT_DATE = "documentDate"
DOCUMENT_DATE_DESC = "documentDate desc"
DUE_DATE = "dueDate"
DUE_DATE_DESC = "dueDate desc"
EMAIL = "email"
EMAIL_DESC = "email desc"
EXTERNAL_DOCUMENT_NUMBER = "externalDocumentNumber"
EXTERNAL_DOCUMENT_NUMBER_DESC = "externalDocumentNumber desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
PAYMENT_TERMS_ID = "paymentTermsId"
PAYMENT_TERMS_ID_DESC = "paymentTermsId desc"
PHONE_NUMBER = "phoneNumber"
PHONE_NUMBER_DESC = "phoneNumber desc"
SALESPERSON = "salesperson"
SALESPERSON_DESC = "salesperson desc"
SELLING_POSTAL_ADDRESS = "sellingPostalAddress"
SELLING_POSTAL_ADDRESS_DESC = "sellingPostalAddress desc"
SENT_DATE = "sentDate"
SENT_DATE_DESC = "sentDate desc"
SHIPMENT_METHOD_ID = "shipmentMethodId"
SHIPMENT_METHOD_ID_DESC = "shipmentMethodId desc"
SHIPPING_POSTAL_ADDRESS = "shippingPostalAddress"
SHIPPING_POSTAL_ADDRESS_DESC = "shippingPostalAddress desc"
SHIP_TO_CONTACT = "shipToContact"
SHIP_TO_CONTACT_DESC = "shipToContact desc"
SHIP_TO_NAME = "shipToName"
SHIP_TO_NAME_DESC = "shipToName desc"
STATUS = "status"
STATUS_DESC = "status desc"
TOTAL_AMOUNT_EXCLUDING_TAX = "totalAmountExcludingTax"
TOTAL_AMOUNT_EXCLUDING_TAX_DESC = "totalAmountExcludingTax desc"
TOTAL_AMOUNT_INCLUDING_TAX = "totalAmountIncludingTax"
TOTAL_AMOUNT_INCLUDING_TAX_DESC = "totalAmountIncludingTax desc"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
TOTAL_TAX_AMOUNT_DESC = "totalTaxAmount desc"
VALID_UNTIL_DATE = "validUntilDate"
VALID_UNTIL_DATE_DESC = "validUntilDate desc"
class Enum306(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCEPTED_DATE = "acceptedDate"
BILLING_POSTAL_ADDRESS = "billingPostalAddress"
BILL_TO_CUSTOMER_ID = "billToCustomerId"
BILL_TO_CUSTOMER_NUMBER = "billToCustomerNumber"
BILL_TO_NAME = "billToName"
CURRENCY_CODE = "currencyCode"
CURRENCY_ID = "currencyId"
CUSTOMER_ID = "customerId"
CUSTOMER_NAME = "customerName"
CUSTOMER_NUMBER = "customerNumber"
DISCOUNT_AMOUNT = "discountAmount"
DOCUMENT_DATE = "documentDate"
DUE_DATE = "dueDate"
EMAIL = "email"
EXTERNAL_DOCUMENT_NUMBER = "externalDocumentNumber"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PAYMENT_TERMS_ID = "paymentTermsId"
PHONE_NUMBER = "phoneNumber"
SALESPERSON = "salesperson"
SELLING_POSTAL_ADDRESS = "sellingPostalAddress"
SENT_DATE = "sentDate"
SHIPMENT_METHOD_ID = "shipmentMethodId"
SHIPPING_POSTAL_ADDRESS = "shippingPostalAddress"
SHIP_TO_CONTACT = "shipToContact"
SHIP_TO_NAME = "shipToName"
STATUS = "status"
TOTAL_AMOUNT_EXCLUDING_TAX = "totalAmountExcludingTax"
TOTAL_AMOUNT_INCLUDING_TAX = "totalAmountIncludingTax"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
VALID_UNTIL_DATE = "validUntilDate"
CURRENCY = "currency"
CUSTOMER = "customer"
PAYMENT_TERM = "paymentTerm"
SALES_QUOTE_LINES = "salesQuoteLines"
SHIPMENT_METHOD = "shipmentMethod"
class Enum307(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CURRENCY = "currency"
CUSTOMER = "customer"
PAYMENT_TERM = "paymentTerm"
SALES_QUOTE_LINES = "salesQuoteLines"
SHIPMENT_METHOD = "shipmentMethod"
class Enum308(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCEPTED_DATE = "acceptedDate"
BILLING_POSTAL_ADDRESS = "billingPostalAddress"
BILL_TO_CUSTOMER_ID = "billToCustomerId"
BILL_TO_CUSTOMER_NUMBER = "billToCustomerNumber"
BILL_TO_NAME = "billToName"
CURRENCY_CODE = "currencyCode"
CURRENCY_ID = "currencyId"
CUSTOMER_ID = "customerId"
CUSTOMER_NAME = "customerName"
CUSTOMER_NUMBER = "customerNumber"
DISCOUNT_AMOUNT = "discountAmount"
DOCUMENT_DATE = "documentDate"
DUE_DATE = "dueDate"
EMAIL = "email"
EXTERNAL_DOCUMENT_NUMBER = "externalDocumentNumber"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PAYMENT_TERMS_ID = "paymentTermsId"
PHONE_NUMBER = "phoneNumber"
SALESPERSON = "salesperson"
SELLING_POSTAL_ADDRESS = "sellingPostalAddress"
SENT_DATE = "sentDate"
SHIPMENT_METHOD_ID = "shipmentMethodId"
SHIPPING_POSTAL_ADDRESS = "shippingPostalAddress"
SHIP_TO_CONTACT = "shipToContact"
SHIP_TO_NAME = "shipToName"
STATUS = "status"
TOTAL_AMOUNT_EXCLUDING_TAX = "totalAmountExcludingTax"
TOTAL_AMOUNT_INCLUDING_TAX = "totalAmountIncludingTax"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
VALID_UNTIL_DATE = "validUntilDate"
CURRENCY = "currency"
CUSTOMER = "customer"
PAYMENT_TERM = "paymentTerm"
SALES_QUOTE_LINES = "salesQuoteLines"
SHIPMENT_METHOD = "shipmentMethod"
class Enum309(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CURRENCY = "currency"
CUSTOMER = "customer"
PAYMENT_TERM = "paymentTerm"
| |
<reponame>honzajavorek/oci-cli<filename>services/autoscaling/src/oci_cli_auto_scaling/generated/autoscaling_cli.py
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
from __future__ import print_function
import click
import oci # noqa: F401
import six # noqa: F401
import sys # noqa: F401
from oci_cli.cli_root import cli
from oci_cli import cli_constants # noqa: F401
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
from oci_cli import custom_types # noqa: F401
from oci_cli.aliasing import CommandGroupWithAlias
@cli.command(cli_util.override('autoscaling_root_group.command_name', 'autoscaling'), cls=CommandGroupWithAlias, help=cli_util.override('autoscaling_root_group.help', """APIs for dynamically scaling Compute resources to meet application requirements.
For information about the Compute service, see [Overview of the Compute Service](/Content/Compute/Concepts/computeoverview.htm).
"""), short_help=cli_util.override('autoscaling_root_group.short_help', """Autoscaling API"""))
@cli_util.help_option_group
def autoscaling_root_group():
pass
@click.command(cli_util.override('auto_scaling_configuration_group.command_name', 'auto-scaling-configuration'), cls=CommandGroupWithAlias, help="""An autoscaling configuration allows you to dynamically scale the resources in a Compute instance pool. For more information, see [Autoscaling].""")
@cli_util.help_option_group
def auto_scaling_configuration_group():
pass
@click.command(cli_util.override('auto_scaling_policy_group.command_name', 'auto-scaling-policy'), cls=CommandGroupWithAlias, help="""Autoscaling policies define the criteria that trigger autoscaling actions and the actions to take.
An autoscaling policy is part of an autoscaling configuration. For more information, see [Autoscaling].""")
@cli_util.help_option_group
def auto_scaling_policy_group():
pass
autoscaling_root_group.add_command(auto_scaling_configuration_group)
autoscaling_root_group.add_command(auto_scaling_policy_group)
@auto_scaling_configuration_group.command(name=cli_util.override('change_auto_scaling_configuration_compartment.command_name', 'change-compartment'), help=u"""Moves an autoscaling configuration into a different compartment within the same tenancy. For information about moving resources between compartments, see [Moving Resources to a Different Compartment].
When you move an autoscaling configuration to a different compartment, associated resources such as instance pools are not moved.""")
@cli_util.option('--auto-scaling-configuration-id', required=True, help=u"""The [OCID] of the autoscaling configuration.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment to move the autoscaling configuration to.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def change_auto_scaling_configuration_compartment(ctx, from_json, auto_scaling_configuration_id, compartment_id, if_match):
if isinstance(auto_scaling_configuration_id, six.string_types) and len(auto_scaling_configuration_id.strip()) == 0:
raise click.UsageError('Parameter --auto-scaling-configuration-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
client = cli_util.build_client('auto_scaling', ctx)
result = client.change_auto_scaling_configuration_compartment(
auto_scaling_configuration_id=auto_scaling_configuration_id,
change_compartment_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@auto_scaling_configuration_group.command(name=cli_util.override('create_auto_scaling_configuration.command_name', 'create'), help=u"""Creates an autoscaling configuration.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment containing the autoscaling configuration.""")
@cli_util.option('--policies', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--resource', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags].
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--display-name', help=u"""A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags].
Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--cool-down-in-seconds', type=click.INT, help=u"""The minimum period of time to wait between scaling actions. The cooldown period gives the system time to stabilize before rescaling. The minimum value is 300 seconds, which is also the default.""")
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Whether the autoscaling configuration is enabled.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'autoscaling', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'autoscaling', 'class': 'dict(str, string)'}, 'policies': {'module': 'autoscaling', 'class': 'list[CreateAutoScalingPolicyDetails]'}, 'resource': {'module': 'autoscaling', 'class': 'Resource'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'autoscaling', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'autoscaling', 'class': 'dict(str, string)'}, 'policies': {'module': 'autoscaling', 'class': 'list[CreateAutoScalingPolicyDetails]'}, 'resource': {'module': 'autoscaling', 'class': 'Resource'}}, output_type={'module': 'autoscaling', 'class': 'AutoScalingConfiguration'})
@cli_util.wrap_exceptions
def create_auto_scaling_configuration(ctx, from_json, compartment_id, policies, resource, defined_tags, display_name, freeform_tags, cool_down_in_seconds, is_enabled):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['policies'] = cli_util.parse_json_parameter("policies", policies)
details['resource'] = cli_util.parse_json_parameter("resource", resource)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if display_name is not None:
details['displayName'] = display_name
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if cool_down_in_seconds is not None:
details['coolDownInSeconds'] = cool_down_in_seconds
if is_enabled is not None:
details['isEnabled'] = is_enabled
client = cli_util.build_client('auto_scaling', ctx)
result = client.create_auto_scaling_configuration(
create_auto_scaling_configuration_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@auto_scaling_configuration_group.command(name=cli_util.override('create_auto_scaling_configuration_instance_pool_resource.command_name', 'create-auto-scaling-configuration-instance-pool-resource'), help=u"""Creates an autoscaling configuration.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment containing the autoscaling configuration.""")
@cli_util.option('--policies', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--resource-id', required=True, help=u"""The [OCID] of the resource that is managed by the autoscaling configuration.""")
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags].
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--display-name', help=u"""A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags].
Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--cool-down-in-seconds', type=click.INT, help=u"""The minimum period of time to wait between scaling actions. The cooldown period gives the system time to stabilize before rescaling. The minimum value is 300 seconds, which is also the default.""")
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Whether the autoscaling configuration is enabled.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'autoscaling', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'autoscaling', 'class': 'dict(str, string)'}, 'policies': {'module': 'autoscaling', 'class': 'list[CreateAutoScalingPolicyDetails]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'autoscaling', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'autoscaling', 'class': 'dict(str, string)'}, 'policies': {'module': 'autoscaling', 'class': 'list[CreateAutoScalingPolicyDetails]'}}, output_type={'module': 'autoscaling', 'class': 'AutoScalingConfiguration'})
@cli_util.wrap_exceptions
def create_auto_scaling_configuration_instance_pool_resource(ctx, from_json, compartment_id, policies, resource_id, defined_tags, display_name, freeform_tags, cool_down_in_seconds, is_enabled):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['resource'] = {}
details['compartmentId'] = compartment_id
details['policies'] = cli_util.parse_json_parameter("policies", policies)
details['resource']['id'] = resource_id
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if display_name is not None:
details['displayName'] = display_name
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if cool_down_in_seconds is not None:
details['coolDownInSeconds'] = cool_down_in_seconds
if is_enabled is not None:
details['isEnabled'] = is_enabled
details['resource']['type'] = 'instancePool'
client = cli_util.build_client('auto_scaling', ctx)
result = client.create_auto_scaling_configuration(
create_auto_scaling_configuration_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@auto_scaling_policy_group.command(name=cli_util.override('create_auto_scaling_policy.command_name', 'create'), help=u"""Creates an autoscaling policy for the specified autoscaling configuration.""")
@cli_util.option('--auto-scaling-configuration-id', required=True, help=u"""The [OCID] of the autoscaling configuration.""")
@cli_util.option('--capacity', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""The capacity requirements of the autoscaling policy.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--policy-type', required=True, help=u"""The type of autoscaling policy.""")
@cli_util.option('--display-name', help=u"""A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.""")
@json_skeleton_utils.get_cli_json_input_option({'capacity': {'module': 'autoscaling', 'class': 'Capacity'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'capacity': {'module': 'autoscaling', 'class': 'Capacity'}}, output_type={'module': 'autoscaling', 'class': 'AutoScalingPolicy'})
@cli_util.wrap_exceptions
def create_auto_scaling_policy(ctx, from_json, auto_scaling_configuration_id, capacity, policy_type, display_name):
if isinstance(auto_scaling_configuration_id, six.string_types) and len(auto_scaling_configuration_id.strip()) == 0:
raise click.UsageError('Parameter --auto-scaling-configuration-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['capacity'] = cli_util.parse_json_parameter("capacity", capacity)
details['policyType'] = policy_type
if display_name is not None:
details['displayName'] = display_name
client = cli_util.build_client('auto_scaling', ctx)
result = client.create_auto_scaling_policy(
auto_scaling_configuration_id=auto_scaling_configuration_id,
create_auto_scaling_policy_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@auto_scaling_policy_group.command(name=cli_util.override('create_auto_scaling_policy_create_threshold_policy_details.command_name', 'create-auto-scaling-policy-create-threshold-policy-details'), help=u"""Creates an autoscaling policy for the specified autoscaling configuration.""")
@cli_util.option('--auto-scaling-configuration-id', required=True, help=u"""The [OCID] of the autoscaling configuration.""")
@cli_util.option('--capacity', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""The capacity requirements of the autoscaling policy.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--rules', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--display-name', help=u"""A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.""")
@json_skeleton_utils.get_cli_json_input_option({'capacity': {'module': 'autoscaling', 'class': 'Capacity'}, 'rules': {'module': 'autoscaling', 'class': 'list[CreateConditionDetails]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'capacity': {'module': 'autoscaling', 'class': 'Capacity'}, 'rules': {'module': 'autoscaling', 'class': 'list[CreateConditionDetails]'}}, output_type={'module': 'autoscaling', 'class': 'AutoScalingPolicy'})
@cli_util.wrap_exceptions
def create_auto_scaling_policy_create_threshold_policy_details(ctx, from_json, auto_scaling_configuration_id, capacity, rules, display_name):
if isinstance(auto_scaling_configuration_id, six.string_types) and len(auto_scaling_configuration_id.strip()) == 0:
raise click.UsageError('Parameter --auto-scaling-configuration-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['capacity'] = cli_util.parse_json_parameter("capacity", capacity)
details['rules'] = cli_util.parse_json_parameter("rules", rules)
if display_name is not None:
details['displayName'] = display_name
details['policyType'] = 'threshold'
client = cli_util.build_client('auto_scaling', ctx)
result = client.create_auto_scaling_policy(
auto_scaling_configuration_id=auto_scaling_configuration_id,
create_auto_scaling_policy_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@auto_scaling_configuration_group.command(name=cli_util.override('delete_auto_scaling_configuration.command_name', 'delete'), help=u"""Deletes an autoscaling configuration.""")
@cli_util.option('--auto-scaling-configuration-id', required=True, help=u"""The [OCID] of the autoscaling configuration.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def delete_auto_scaling_configuration(ctx, from_json, auto_scaling_configuration_id, if_match):
if isinstance(auto_scaling_configuration_id, six.string_types) and len(auto_scaling_configuration_id.strip()) == 0:
raise click.UsageError('Parameter --auto-scaling-configuration-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('auto_scaling', ctx)
result = client.delete_auto_scaling_configuration(
auto_scaling_configuration_id=auto_scaling_configuration_id,
**kwargs
)
cli_util.render_response(result, ctx)
@auto_scaling_policy_group.command(name=cli_util.override('delete_auto_scaling_policy.command_name', 'delete'), help=u"""Deletes an autoscaling policy for the specified autoscaling configuration.""")
@cli_util.option('--auto-scaling-configuration-id', required=True, help=u"""The [OCID] of the autoscaling configuration.""")
@cli_util.option('--auto-scaling-policy-id', required=True, help=u"""The ID of the autoscaling policy.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if | |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxtpro.base.exchange import Exchange
import ccxt.async_support as ccxt
from ccxtpro.base.cache import ArrayCache, ArrayCacheBySymbolById, ArrayCacheByTimestamp
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
from ccxt.base.errors import ExchangeError
from ccxt.base.precise import Precise
class binance(Exchange, ccxt.binance):
def describe(self):
return self.deep_extend(super(binance, self).describe(), {
'has': {
'ws': True,
'watchBalance': True,
'watchMyTrades': True,
'watchOHLCV': True,
'watchOrderBook': True,
'watchOrders': True,
'watchTicker': True,
'watchTickers': False, # for now
'watchTrades': True,
},
'urls': {
'test': {
'ws': {
'spot': 'wss://testnet.binance.vision/ws',
'margin': 'wss://testnet.binance.vision/ws',
'future': 'wss://stream.binancefuture.com/ws',
'delivery': 'wss://dstream.binancefuture.com/ws',
},
},
'api': {
'ws': {
'spot': 'wss://stream.binance.com:9443/ws',
'margin': 'wss://stream.binance.com:9443/ws',
'future': 'wss://fstream.binance.com/ws',
'delivery': 'wss://dstream.binance.com/ws',
},
},
},
'options': {
# get updates every 1000ms or 100ms
# or every 0ms in real-time for futures
'watchOrderBookRate': 100,
'tradesLimit': 1000,
'ordersLimit': 1000,
'OHLCVLimit': 1000,
'requestId': {},
'watchOrderBookLimit': 1000, # default limit
'watchTrades': {
'name': 'trade', # 'trade' or 'aggTrade'
},
'watchTicker': {
'name': 'ticker', # ticker = 1000ms L1+OHLCV, bookTicker = real-time L1
},
'watchBalance': {
'fetchBalanceSnapshot': False, # or True
'awaitBalanceSnapshot': True, # whether to wait for the balance snapshot before providing updates
},
'wallet': 'wb', # wb = wallet balance, cw = cross balance
'listenKeyRefreshRate': 1200000, # 20 mins
},
})
def request_id(self, url):
options = self.safe_value(self.options, 'requestId', {})
previousValue = self.safe_integer(options, url, 0)
newValue = self.sum(previousValue, 1)
self.options['requestId'][url] = newValue
return newValue
async def watch_order_book(self, symbol, limit=None, params={}):
#
# todo add support for <levels>-snapshots(depth)
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams # <symbol>@depth<levels>@100ms or <symbol>@depth<levels>(1000ms)
# valid <levels> are 5, 10, or 20
#
# default 100, max 1000, valid limits 5, 10, 20, 50, 100, 500, 1000
if limit is not None:
if (limit != 5) and (limit != 10) and (limit != 20) and (limit != 50) and (limit != 100) and (limit != 500) and (limit != 1000):
raise ExchangeError(self.id + ' watchOrderBook limit argument must be None, 5, 10, 20, 50, 100, 500 or 1000')
#
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'watchOrderBook', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
market = self.market(symbol)
#
# notice the differences between trading futures and spot trading
# the algorithms use different urls in step 1
# delta caching and merging also differs in steps 4, 5, 6
#
# spot/margin
# https://binance-docs.github.io/apidocs/spot/en/#how-to-manage-a-local-order-book-correctly
#
# 1. Open a stream to wss://stream.binance.com:9443/ws/bnbbtc@depth.
# 2. Buffer the events you receive from the stream.
# 3. Get a depth snapshot from https://www.binance.com/api/v1/depth?symbol=BNBBTC&limit=1000 .
# 4. Drop any event where u is <= lastUpdateId in the snapshot.
# 5. The first processed event should have U <= lastUpdateId+1 AND u >= lastUpdateId+1.
# 6. While listening to the stream, each new event's U should be equal to the previous event's u+1.
# 7. The data in each event is the absolute quantity for a price level.
# 8. If the quantity is 0, remove the price level.
# 9. Receiving an event that removes a price level that is not in your local order book can happen and is normal.
#
# futures
# https://binance-docs.github.io/apidocs/futures/en/#how-to-manage-a-local-order-book-correctly
#
# 1. Open a stream to wss://fstream.binance.com/stream?streams=btcusdt@depth.
# 2. Buffer the events you receive from the stream. For same price, latest received update covers the previous one.
# 3. Get a depth snapshot from https://fapi.binance.com/fapi/v1/depth?symbol=BTCUSDT&limit=1000 .
# 4. Drop any event where u is < lastUpdateId in the snapshot.
# 5. The first processed event should have U <= lastUpdateId AND u >= lastUpdateId
# 6. While listening to the stream, each new event's pu should be equal to the previous event's u, otherwise initialize the process from step 3.
# 7. The data in each event is the absolute quantity for a price level.
# 8. If the quantity is 0, remove the price level.
# 9. Receiving an event that removes a price level that is not in your local order book can happen and is normal.
#
name = 'depth'
messageHash = market['lowercaseId'] + '@' + name
url = self.urls['api']['ws'][type] # + '/' + messageHash
requestId = self.request_id(url)
watchOrderBookRate = self.safe_string(self.options, 'watchOrderBookRate', '100')
request = {
'method': 'SUBSCRIBE',
'params': [
messageHash + '@' + watchOrderBookRate + 'ms',
],
'id': requestId,
}
subscription = {
'id': str(requestId),
'messageHash': messageHash,
'name': name,
'symbol': symbol,
'method': self.handle_order_book_subscription,
'limit': limit,
'type': type,
'params': params,
}
message = self.extend(request, query)
# 1. Open a stream to wss://stream.binance.com:9443/ws/bnbbtc@depth.
orderbook = await self.watch(url, messageHash, message, messageHash, subscription)
return orderbook.limit(limit)
async def fetch_order_book_snapshot(self, client, message, subscription):
defaultLimit = self.safe_integer(self.options, 'watchOrderBookLimit', 1000)
type = self.safe_value(subscription, 'type')
symbol = self.safe_string(subscription, 'symbol')
messageHash = self.safe_string(subscription, 'messageHash')
limit = self.safe_integer(subscription, 'limit', defaultLimit)
params = self.safe_value(subscription, 'params')
# 3. Get a depth snapshot from https://www.binance.com/api/v1/depth?symbol=BNBBTC&limit=1000 .
# todo: self is a synch blocking call in ccxt.php - make it async
# default 100, max 1000, valid limits 5, 10, 20, 50, 100, 500, 1000
snapshot = await self.fetch_order_book(symbol, limit, params)
orderbook = self.safe_value(self.orderbooks, symbol)
if orderbook is None:
# if the orderbook is dropped before the snapshot is received
return
orderbook.reset(snapshot)
# unroll the accumulated deltas
messages = orderbook.cache
for i in range(0, len(messages)):
message = messages[i]
U = self.safe_integer(message, 'U')
u = self.safe_integer(message, 'u')
pu = self.safe_integer(message, 'pu')
if type == 'future':
# 4. Drop any event where u is < lastUpdateId in the snapshot
if u < orderbook['nonce']:
continue
# 5. The first processed event should have U <= lastUpdateId AND u >= lastUpdateId
if (U <= orderbook['nonce']) and (u >= orderbook['nonce']) or (pu == orderbook['nonce']):
self.handle_order_book_message(client, message, orderbook)
else:
# 4. Drop any event where u is <= lastUpdateId in the snapshot
if u <= orderbook['nonce']:
continue
# 5. The first processed event should have U <= lastUpdateId+1 AND u >= lastUpdateId+1
if ((U - 1) <= orderbook['nonce']) and ((u - 1) >= orderbook['nonce']):
self.handle_order_book_message(client, message, orderbook)
self.orderbooks[symbol] = orderbook
client.resolve(orderbook, messageHash)
def handle_delta(self, bookside, delta):
price = self.safe_float(delta, 0)
amount = self.safe_float(delta, 1)
bookside.store(price, amount)
def handle_deltas(self, bookside, deltas):
for i in range(0, len(deltas)):
self.handle_delta(bookside, deltas[i])
def handle_order_book_message(self, client, message, orderbook):
u = self.safe_integer(message, 'u')
self.handle_deltas(orderbook['asks'], self.safe_value(message, 'a', []))
self.handle_deltas(orderbook['bids'], self.safe_value(message, 'b', []))
orderbook['nonce'] = u
timestamp = self.safe_integer(message, 'E')
orderbook['timestamp'] = timestamp
orderbook['datetime'] = self.iso8601(timestamp)
return orderbook
def handle_order_book(self, client, message):
#
# initial snapshot is fetched with ccxt's fetchOrderBook
# the feed does not include a snapshot, just the deltas
#
# {
# "e": "depthUpdate", # Event type
# "E": 1577554482280, # Event time
# "s": "BNBBTC", # Symbol
# "U": 157, # First update ID in event
# "u": 160, # Final update ID in event
# "b": [ # bids
# ["0.0024", "10"], # price, size
# ],
# "a": [ # asks
# ["0.0026", "100"], # price, size
# ]
# }
#
marketId = self.safe_string(message, 's')
market = self.safe_market(marketId)
symbol = market['symbol']
name = 'depth'
messageHash = market['lowercaseId'] + '@' + name
orderbook = self.safe_value(self.orderbooks, symbol)
if orderbook is None:
#
# https://github.com/ccxt/ccxt/issues/6672
#
# Sometimes Binance sends the first delta before the subscription
# confirmation arrives. At that point the orderbook is not
# initialized yet and the snapshot has not been requested yet
# therefore it is safe to drop these premature messages.
#
return
nonce = self.safe_integer(orderbook, 'nonce')
if nonce is None:
# 2. Buffer the events you receive from the stream.
orderbook.cache.append(message)
else:
try:
U = self.safe_integer(message, 'U')
u = self.safe_integer(message, 'u')
pu = self.safe_integer(message, 'pu')
if pu is None:
# spot
# 4. Drop any event where u is <= lastUpdateId in the snapshot
if u > orderbook['nonce']:
timestamp = self.safe_integer(orderbook, 'timestamp')
conditional = None
if timestamp is None:
# 5. The first processed event should have U <= lastUpdateId+1 AND u >= lastUpdateId+1
conditional = ((U - 1) <= orderbook['nonce']) and ((u - 1) >= orderbook['nonce'])
| |
#!/usr/bin/env python
from __future__ import division
import sys
import os
import re
gen_dir = os.path.dirname(os.path.abspath(__file__))
csv_dir = os.path.join(gen_dir, "csv")
tmpl_dir = os.path.join(gen_dir, "templates")
dest_dir = os.path.abspath(os.path.join(gen_dir, "..", "src", "generated"))
class Template(object):
def __init__(self, filename):
with open(os.path.join(tmpl_dir, filename), 'r') as f:
name = None
value = None
for line in f:
if line[0] == '@':
if name is not None:
setattr(self, name, value)
name = line[1:].rstrip('\r\n')
value = ''
else:
value += line
if name is not None:
setattr(self, name, value)
copy = Template('copyright.tmpl')
reader = Template('reader.tmpl')
ctor = Template('constructor.tmpl')
decl = Template('declaration.tmpl')
decl2 = Template('declaration.tmpl')
chunk = Template('chunks.tmpl')
freader = Template('flag_reader.tmpl')
decl2.enum_header = decl.enum2_header
decl2.enum_tmpl = decl.enum2_tmpl
decl2.enum_footer = decl.enum2_footer
cpp_types = {
'Boolean': 'bool',
'Double': 'double',
'Integer': 'int',
'UInt8': 'uint8_t',
'UInt16': 'uint16_t',
'UInt32': 'uint32_t',
'Int16': 'int16_t',
'String': 'std::string',
}
def flags_def(struct_name):
f = ['\t\t\tbool %s;\n' % name for name in flags[struct_name]]
return 'struct Flags {\n' + ''.join(f) + '\t\t}'
def cpp_type(ty, prefix = True, expand_flags = None):
if ty in cpp_types:
return cpp_types[ty]
m = re.match(r'Array<(.*):(.*)>', ty)
if m:
return 'std::vector<%s>' % cpp_type(m.group(1), prefix, expand_flags)
m = re.match(r'(Vector|Array)<(.*)>', ty)
if m:
return 'std::vector<%s>' % cpp_type(m.group(2), prefix, expand_flags)
m = re.match(r'Ref<(.*):(.*)>', ty)
if m:
return cpp_type(m.group(2), prefix, expand_flags)
m = re.match(r'Ref<(.*)>', ty)
if m:
return 'int'
m = re.match(r'Enum<(.*)>', ty)
if m:
return 'int'
m = re.match(r'(.*)_Flags$', ty)
if m:
if expand_flags:
return flags_def(expand_flags)
else:
ty = m.expand(r'\1::Flags')
if prefix:
ty = 'RPG::' + ty
return ty
if prefix:
ty = 'RPG::' + ty
return ty
int_types = {
'UInt8': 'uint8_t',
'UInt16': 'uint16_t',
'UInt32': 'uint32_t',
'Int16': 'int16_t',
}
def struct_headers(ty, header_map):
if ty == 'String':
return ['<string>']
if ty in int_types:
return ['"reader_types.h"']
if ty in cpp_types:
return []
m = re.match(r'Ref<(.*):(.*)>', ty)
if m:
return struct_headers(m.group(2), header_map)
if re.match(r'Ref<(.*)>', ty):
return []
if re.match(r'Enum<(.*)>', ty):
return []
if re.match(r'(.*)_Flags$', ty):
return []
m = re.match(r'Array<(.*):(.*)>', ty)
if m:
return ['<vector>'] + struct_headers(m.group(1), header_map)
m = re.match(r'(Vector|Array)<(.*)>', ty)
if m:
return ['<vector>'] + struct_headers(m.group(2), header_map)
header = header_map.get(ty)
if header is not None:
return ['"rpg_%s.h"' % header]
if ty in ['Parameters', 'Equipment', 'EventCommand', 'MoveCommand', 'Rect', 'TreeMap']:
return ['"rpg_%s.h"' % ty.lower()]
return []
def get_structs(filename = 'structs.csv'):
result = []
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
filetype, structname, hasid = data
hasid = bool(int(hasid)) if hasid else None
filename = structname.lower()
result.append((filetype, filename, structname, hasid))
return result
def get_fields(filename = 'fields.csv'):
result = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',', 6)
struct, fname, issize, ftype, code, dfl, comment = data
issize = issize.lower() == 't'
code = int(code, 16) if code else None
if struct not in result:
result[struct] = []
result[struct].append((fname, issize, ftype, code, dfl, comment))
return result
def get_enums(filename = 'enums.csv'):
enums = {}
fields = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
sname, ename, name, num = data
num = int(num)
if (sname, ename) not in fields:
if sname not in enums:
enums[sname] = []
enums[sname].append(ename)
fields[sname, ename] = []
fields[sname, ename].append((name, num))
return enums, fields
def get_flags(filename = 'flags.csv'):
result = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
struct, fname = data
if struct not in result:
result[struct] = []
result[struct].append(fname)
return result
def get_setup(filename = 'setup.csv'):
result = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
struct, method, headers = data
headers = headers.split(' ') if headers else []
if struct not in result:
result[struct] = []
result[struct].append((method, headers))
return result
def get_headers(structs, sfields, setup):
header_map = dict([(struct_name, filename)
for filetype, filename, struct_name, hasid in structs])
result = {}
for filetype, filename, struct_name, hasid in structs:
if struct_name not in sfields:
continue
headers = set()
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
headers.update(struct_headers(ftype, header_map))
if struct_name in setup:
for method, hdrs in setup[struct_name]:
headers.update(hdrs)
result[struct_name] = sorted(x for x in headers if x[0] == '<') + sorted(x for x in headers if x[0] == '"')
return result
def write_enums(sname, f):
for ename in enums[sname]:
dcl = decl2 if (sname, ename) in [('MoveCommand','Code'),('EventCommand','Code')] else decl
evars = dict(ename = ename)
f.write(dcl.enum_header % evars)
ef = efields[sname, ename]
n = len(ef)
for i, (name, num) in enumerate(ef):
comma = '' if i == n - 1 else ','
vars = dict(ename = ename,
name = name,
num = num,
comma = comma)
f.write(dcl.enum_tmpl % vars)
f.write(dcl.enum_footer % evars)
f.write('\n')
def write_setup(sname, f):
for method, headers in setup[sname]:
f.write('\t\t%s;\n' % method)
def generate_reader(f, struct_name, vars):
f.write(copy.header)
f.write(reader.header % vars)
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
fvars = dict(
ftype = cpp_type(ftype),
fname = fname)
if issize:
f.write(reader.size_tmpl % fvars)
else:
f.write(reader.typed_tmpl % fvars)
f.write(reader.footer % vars)
def write_flags(f, sname, fname):
for name in flags[sname]:
fvars = dict(
fname = fname,
name = name)
f.write(ctor.flags % fvars)
def generate_ctor(f, struct_name, hasid, vars):
f.write(copy.header)
f.write(ctor.header % vars)
if hasid:
f.write(ctor.tmpl % dict(fname = 'ID', default = '0'))
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
if issize:
continue
if ftype.endswith('_Flags'):
write_flags(f, struct_name, fname)
continue
if dfl == '':
continue
if ftype.startswith('Vector'):
continue
if ftype.startswith('Array'):
continue
if ftype == 'Boolean':
dfl = dfl.lower()
elif ftype == 'String':
dfl = '"' + dfl[1:-1] + '"'
if '|' in dfl:
# dfl = re.sub(r'(.*)\|(.*)', r'\1', dfl)
dfl = -1
fvars = dict(
fname = fname,
default = dfl)
f.write(ctor.tmpl % fvars)
if struct_name in setup and any('Init()' in method
for method, hdrs in setup[struct_name]):
f.write('\n\tInit();\n')
f.write(ctor.footer % vars)
def needs_ctor(struct_name, hasid):
if hasid:
return True
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
if issize:
continue
if ftype.endswith('_Flags'):
return True
if dfl != '':
return True
return False
def generate_header(f, struct_name, hasid, vars):
f.write(copy.header)
f.write(decl.header1 % vars)
if headers[struct_name]:
f.write(decl.header2)
for header in headers[struct_name]:
f.write(decl.header_tmpl % dict(header = header))
f.write(decl.header3 % vars)
if struct_name in enums:
write_enums(struct_name, f)
needs_blank = False
if needs_ctor(struct_name, hasid):
f.write(decl.ctor % vars)
needs_blank = True
if struct_name in setup:
write_setup(struct_name, f)
needs_blank = True
if needs_blank:
f.write('\n')
if hasid:
f.write(decl.tmpl % dict(ftype = 'int', fname = 'ID'))
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
if issize:
continue
fvars = dict(
ftype = cpp_type(ftype, False, struct_name),
fname = fname)
f.write(decl.tmpl % fvars)
f.write(decl.footer % vars)
def generate_chunks(f, struct_name, vars):
f.write(chunk.header % vars)
mwidth = max(len(field[0] + ('_size' if field[1] else '')) for field in sfields[struct_name]) + 1
mwidth = (mwidth + 3) // 4 * 4
# print struct_name, mwidth
sf = sfields[struct_name]
n = len(sf)
for i, field in enumerate(sf):
fname, issize, ftype, code, dfl, comment = field
if issize:
fname += '_size'
pad = mwidth - len(fname)
ntabs = (pad + 3) // 4
tabs = '\t' * ntabs
comma = ' ' if i == n - 1 else ','
fvars = dict(
fname = fname,
tabs = tabs,
code = code,
comma = comma,
comment = comment)
f.write(chunk.tmpl % fvars)
f.write(chunk.footer % vars)
def generate_struct(filetype, filename, struct_name, hasid):
if struct_name not in sfields:
return
vars = dict(
filetype = filetype,
filename = filename,
typeupper = filetype.upper(),
structname = struct_name,
structupper = struct_name.upper(),
idtype = ['NoID','WithID'][hasid])
filepath = os.path.join(dest_dir, '%s_%s.cpp' % (filetype, filename))
with open(filepath, 'w') as f:
generate_reader(f, struct_name, vars)
if needs_ctor(struct_name, hasid):
filepath = os.path.join(dest_dir, 'rpg_%s.cpp' % filename)
with open(filepath, 'w') as f:
generate_ctor(f, struct_name, hasid, vars)
filepath = os.path.join(dest_dir, 'rpg_%s.h' % filename)
with open(filepath, 'w') as f:
generate_header(f, struct_name, hasid, vars)
filepath = os.path.join(dest_dir, '%s_chunks.h' | |
"""
This module converts to/from a nested structure (e.g. list of lists of lists of ... of values) and a (nest pattern, flat list) tuple
The nest pattern is a string consisting solely of square brackets and digits.
A number represents the number of consecutive values at the current level of the nested structure.
An opening square bracket represents entering a deeper level of nesting.
A closing square bracket represents leaving a deper level of nesting.
The square brackets at the top level (would be first and last characters of every pattern) are omitted.
"""
import re
from collections import deque
from enum import Enum
class NestDirective(Enum):
DFS_PUSH=1
DFS_POP=2
BFS_QUEUE=3
BFS_SERVE=4
directive_token_map = {
NestDirective.DFS_PUSH: '[',
NestDirective.DFS_POP: ']',
NestDirective.BFS_QUEUE: '*',
NestDirective.BFS_SERVE: '|',
}
#invert the directive_token_map dict
token_directive_map = {token:directive for directive,token in directive_token_map.items()}
def dfs(nested_structure,include_nest_directives=False,yield_condition=None,get_children_func=None):
"""
Implements a depth-first-search traversal of a nested list structure
include_nest_directives will yield instances of NestDirective.DFS_PUSH and NestDirective.DFS_POP when going to and from a deeper level
yield_condition is a function that accepts an item and returns True if the item should be yielded otherwise False
The default yield_condition returns True iff the item is not a list or tuple.
get_children_func is a function that accepts an item and returns a list of children items to iterate through or None if no iteration through children should be performed
The default get_children_func returns the item itself if it is a list or a tuple, otherwise None
If an item is to be yielded and has children, then it is yielded before its children are processed.
>>> list(dfs([1,[2,3,[4,5,[6,7],8,9],10,11],12]))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> list(dfs([1,[2],[3]]))
[1, 2, 3]
>>> list(dfs([1,[2],[3],4]))
[1, 2, 3, 4]
>>> list(dfs([]))
[]
"""
if yield_condition is None:
yield_condition = lambda item: not isinstance(item,(list,tuple))
if get_children_func is None:
get_children_func = lambda item: (item if isinstance(item,(list,tuple)) else None)
stack = deque([[nested_structure,0]])
while len(stack) > 0:
tree,pos = stack[-1]
pushed=False
while pos < len(tree):
item = tree[pos]
pos += 1
if yield_condition(item):
yield item
children = get_children_func(item)
if children is not None:
if include_nest_directives:
yield NestDirective.DFS_PUSH
stack[-1][1] = pos
stack.append([children,0])
pushed = True
break
if not pushed and pos == len(tree):
if len(stack) > 1 and include_nest_directives:
yield NestDirective.DFS_POP
stack.pop()
def bfs(nested_structure,include_nest_directives=False,yield_condition=None,get_children_func=None):
"""
Implements a breadth-first-search traversal of a nested list structure
include_nest_directives will yield instances of NestDirective.BFS_QUEUE and NestDirective.BFS_SERVE when encountering a subtree and when switching to process a new subtree
yield_condition and get_children_func have the same meaning as in the dfs function
>>> list(bfs([1,[2,3,[4,5,[6,7],8,9],10,11],12]))
[1, 12, 2, 3, 10, 11, 4, 5, 8, 9, 6, 7]
>>> list(bfs([1,[2],[3]]))
[1, 2, 3]
>>> list(bfs([1,[2],[3],4]))
[1, 4, 2, 3]
>>> list(bfs([]))
[]
"""
if yield_condition is None:
yield_condition = lambda item: not isinstance(item,(list,tuple))
if get_children_func is None:
get_children_func = lambda item: (item if isinstance(item,(list,tuple)) else None)
queue = deque([nested_structure])
while len(queue) > 0:
tree = queue.popleft()
for item in tree:
if yield_condition(item):
yield item
children = get_children_func(item)
if children is not None:
if include_nest_directives:
yield NestDirective.BFS_QUEUE
queue.append(item)
if len(queue) > 0 and include_nest_directives:
yield NestDirective.BFS_SERVE
def flatten(nested_structure,algorithm=dfs):
"""
Traverses the nested structure according to the algorithm.
Produces a structure pattern string and a flat list
The structure pattern can be combined with the flat list to reconstruct the additional structure using the deflatten function
The flat list corresponds to the order of traversal in the provided algorithm parameter
>>> flatten([1,[2,3,[4,5,[6,7],8,9],10,11],12])
('1[2[2[2]2]2]1', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
>>> flatten([1,[2,3,[4,5,[6,7],8,9],10,11],12],bfs)
('1*1|2*2|2*2|2', [1, 12, 2, 3, 10, 11, 4, 5, 8, 9, 6, 7])
"""
if algorithm not in [dfs,bfs]:
raise Exception('algorithm must be either the function dfs or the function bfs')
pattern_list = []
flat_list = []
consecutive_item_count = 0
for item in algorithm(nested_structure,include_nest_directives=True):
if isinstance(item,NestDirective):
if consecutive_item_count > 0:
pattern_list.append(str(consecutive_item_count))
consecutive_item_count = 0
pattern_list.append(directive_token_map[item])
else:
consecutive_item_count += 1
flat_list.append(item)
if consecutive_item_count > 0:
pattern_list.append(str(consecutive_item_count))
structure_pattern = ''.join(pattern_list)
return (structure_pattern,flat_list)
def parse_pattern(structure_pattern):
"""
Splits the structure pattern into integers and nest directive enum values
"""
return [(token_directive_map[token] if token in token_directive_map else int(token)) for token in re.split('(['+re.escape(''.join(directive_token_map.values()))+'])',structure_pattern) if token != '']
def deflatten(structure_pattern,flat_list):
"""
Given a structure pattern and a flat list, construct a nested list structure
>>> deflatten('1[2[2[2]2]2]1', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
[1, [2, 3, [4, 5, [6, 7], 8, 9], 10, 11], 12]
>>> deflatten('1*1|2*2|2*2|2', [1, 12, 2, 3, 10, 11, 4, 5, 8, 9, 6, 7])
[1, [2, 3, [4, 5, [6, 7], 8, 9], 10, 11], 12]
"""
structure_directives = parse_pattern(structure_pattern)
stackqueue = deque()
nested_structure = []
top_nested_structure = nested_structure
flat_position = 0
alg = None
for directive in structure_directives:
if directive is NestDirective.DFS_PUSH:
if alg is bfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = dfs
stackqueue.append(nested_structure)
nested_structure = []
stackqueue[-1].append(nested_structure)
elif directive is NestDirective.DFS_POP:
if alg is bfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = dfs
nested_structure = stackqueue.pop()
elif directive is NestDirective.BFS_QUEUE:
if alg is dfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = bfs
subtree = []
stackqueue.append(subtree)
nested_structure.append(subtree)
elif directive is NestDirective.BFS_SERVE:
if alg is dfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = bfs
nested_structure = stackqueue.popleft()
else:
#is a number -> consume that many items
if flat_position + directive <= len(flat_list):
nested_structure.extend(flat_list[flat_position:flat_position+directive])
flat_position += directive
else:
raise Exception('structure_pattern implies more values than flat_list contains')
if flat_position < len(flat_list):
raise Exception('flat_list has more data than structure_pattern implies')
if len(stackqueue) != 0:
raise Exception('Structure pattern contains imbalanced directive tokens')
return top_nested_structure
def get_nested_indices(structure_pattern,flat_index):
"""
Given a structure pattern and an index into the flat list, return the corresponding sequence of indices identifying the position in the nested structure.
A negative flat index works from the end of the flat list
>>> get_nested_indices('1[2[2[2]2]2]1',0)
[0]
>>> get_nested_indices('1[2[2[2]2]2]1',1)
[1, 0]
>>> get_nested_indices('1[2[2[2]2]2]1',2)
[1, 1]
>>> get_nested_indices('1[2[2[2]2]2]1',3)
[1, 2, 0]
>>> get_nested_indices('1[2[2[2]2]2]1',4)
[1, 2, 1]
>>> get_nested_indices('1[2[2[2]2]2]1',5)
[1, 2, 2, 0]
>>> get_nested_indices('1[2[2[2]2]2]1',6)
[1, 2, 2, 1]
>>> get_nested_indices('1[2[2[2]2]2]1',10)
[1, 4]
>>> get_nested_indices('1[2[2[2]2]2]1',11)
[2]
"""
nest_indices = [0]
current_flat_index = 0
nest_queue = deque()
structure_directives = parse_pattern(structure_pattern)
alg = None
if flat_index < 0:
flat_index += sum(item for item in structure_directives if isinstance(item,int))
for directive in structure_directives:
if directive is NestDirective.DFS_PUSH:
if alg is bfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = dfs
nest_indices.append(0)
elif directive is NestDirective.DFS_POP:
if alg is bfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = dfs
nest_indices.pop()
nest_indices[-1] += 1
elif directive is NestDirective.BFS_QUEUE:
if alg is dfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = bfs
nest_queue.append(nest_indices[:])
nest_indices[-1] += 1
elif directive is NestDirective.BFS_SERVE:
if alg is dfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
nest_indices = nest_queue.popleft()
nest_indices.append(0)
else:
#is a number
if current_flat_index <= flat_index < (current_flat_index + directive):
nest_indices[-1] += (flat_index-current_flat_index)
return nest_indices
else:
current_flat_index += directive
nest_indices[-1] += directive
raise Exception('flat index exceeds size implied by structure pattern')
def get_flat_index(structure_pattern,nest_indices):
"""
Given a structure pattern and a sequence of indices into the nested structure, return the corresponding flat list index
>>> get_flat_index('1[2[2[2]2]2]1',[0])
0
>>> get_flat_index('1[2[2[2]2]2]1',[1,0])
1
>>> get_flat_index('1[2[2[2]2]2]1',[1,1])
2
>>> get_flat_index('1[2[2[2]2]2]1',[1,2,0])
3
>>> get_flat_index('1[2[2[2]2]2]1',[1,2,1])
4
>>> get_flat_index('1[2[2[2]2]2]1',[1,2,2,0])
5
>>> get_flat_index('1[2[2[2]2]2]1',[1,2,2,1])
6
>>> get_flat_index('1[2[2[2]2]2]1',[1,4])
10
>>> get_flat_index('1[2[2[2]2]2]1',[2])
11
"""
current_nest_indices = [0]
flat_index = 0
nest_queue = deque()
structure_directives = parse_pattern(structure_pattern)
alg = None
for directive in structure_directives:
if directive is NestDirective.DFS_PUSH:
if alg is bfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = dfs
current_nest_indices.append(0)
elif directive is NestDirective.DFS_POP:
if alg is bfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = dfs
current_nest_indices.pop()
current_nest_indices[-1] += 1
elif directive is NestDirective.BFS_QUEUE:
if alg is dfs:
raise Exception('Structure pattern contains both dfs and bfs tokens')
alg = bfs
nest_queue.append(current_nest_indices[:])
| |
<reponame>amitkg29/contrail-controller
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_systest.py
#
# System tests for analytics
#
import sys
builddir = sys.path[0] + '/../..'
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import os
import unittest
import testtools
import fixtures
import socket
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockcassandra import mockcassandra
import logging
import time
import pycassa
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from opserver.sandesh.viz.constants import *
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class AnalyticsTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if AnalyticsTest._check_skip_test() is True:
return
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.cassandra_port = AnalyticsTest.get_free_port()
mockcassandra.start_cassandra(cls.cassandra_port)
@classmethod
def tearDownClass(cls):
if AnalyticsTest._check_skip_test() is True:
return
mockcassandra.stop_cassandra(cls.cassandra_port)
pass
def _update_analytics_start_time(self, start_time):
pool = ConnectionPool(COLLECTOR_KEYSPACE, ['127.0.0.1:%s'
% (self.__class__.cassandra_port)])
col_family = ColumnFamily(pool, SYSTEM_OBJECT_TABLE)
col_family.insert(SYSTEM_OBJECT_ANALYTICS,
{SYSTEM_OBJECT_START_TIME: start_time})
# end _update_analytics_start_time
<EMAIL>('Skipping non-cassandra test with vizd')
def test_00_nocassandra(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("*** test_00_nocassandra ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, 0))
assert vizd_obj.verify_on_setup()
return True
# end test_00_nocassandra
<EMAIL>('Skipping cassandra test with vizd')
def test_01_startup(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it checks that the collector UVE (via redis)
and syslog (via cassandra) can be accessed from
opserver.
'''
logging.info("*** test_01_startup ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
return True
<EMAIL>('Query query engine logs to test QE')
def test_02_message_table_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it checks that the collector UVE (via redis)
and syslog (via cassandra) can be accessed from
opserver.
'''
logging.info("*** test_02_message_table_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
assert vizd_obj.verify_message_table_moduleid()
assert vizd_obj.verify_message_table_select_uint_type()
assert vizd_obj.verify_message_table_messagetype()
assert vizd_obj.verify_message_table_where_or()
assert vizd_obj.verify_message_table_where_and()
assert vizd_obj.verify_message_table_filter()
assert vizd_obj.verify_message_table_filter2()
assert vizd_obj.verify_message_table_sort()
return True
# end test_02_message_table_query
<EMAIL>('Skipping VM UVE test')
def test_03_vm_uve(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
It uses the test class' cassandra instance. Then it checks that the
VM UVE (via redis) can be accessed from opserver.
'''
logging.info("*** test_03_vm_uve ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Delete the VM UVE and verify that the deleted flag is set
# in the UVE cache
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
# Add the VM UVE with the same vm_id and verify that the deleted flag
# is cleared in the UVE cache
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Generate VM with vm_id containing XML control character
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
# end test_03_vm_uve
<EMAIL>('Send/query flow stats to test QE')
def test_04_flow_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends flow stats to the collector
and checks if flow stats can be accessed from
QE.
'''
logging.info("*** test_04_flow_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
# set the start time in analytics db 1 hour earlier than
# the current time. For flow series test, we need to create
# flow samples older than the current time.
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time))
assert generator_obj.verify_on_setup()
generator_obj.generate_flow_samples()
generator_obj1 = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time, hostname=socket.gethostname() + "dup"))
assert generator_obj1.verify_on_setup()
generator_obj1.generate_flow_samples()
generator_object = [generator_obj, generator_obj1]
for obj in generator_object:
assert vizd_obj.verify_flow_samples(obj)
assert vizd_obj.verify_flow_table(generator_obj)
assert vizd_obj.verify_flow_series_aggregation_binning(generator_object)
return True
# end test_04_flow_query
<EMAIL>('Skipping contrail-collector HA test')
def test_05_collector_ha(self):
logging.info('*** test_05_collector_ha ***')
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir,
self.__class__.cassandra_port,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
# contrail-analytics-api and contrail-query-engine are started with collectors[0] as
# primary and collectors[1] as secondary
exp_genlist = ['contrail-collector', 'contrail-analytics-api', 'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# start the contrail-vrouter-agent with collectors[1] as primary and
# collectors[0] as secondary
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# stop collectors[0] and verify that contrail-analytics-api and QE switch
# from primary to secondary collector
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent', 'contrail-analytics-api', 'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# start collectors[0]
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify that the old UVEs are flushed from redis when collector restarts
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop collectors[1] and verify that contrail-analytics-api and QE switch
# from secondary to primary and contrail-vrouter-agent from primary to
# secondary
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent', 'contrail-analytics-api', 'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop Opserver and QE
vizd_obj.opserver.stop()
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# start Opserver and QE with collectors[1] as the primary and
# collectors[0] as the secondary. On generator startup, verify
# that it connects to the secondary collector, if the
# connection to the primary fails
vizd_obj.opserver.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.opserver.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.opserver.start()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent', 'contrail-analytics-api', 'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# stop the collectors[0] - both collectors[0] and collectors[1] are down
# send the VM UVE and verify that the VM UVE is synced after connection
# to the collector
vizd_obj.collectors[0].stop()
# Make sure the connection to the collector is teared down before
# sending the VM UVE
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent', 'contrail-analytics-api', 'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
# end test_05_collector_ha
<EMAIL>('InterVN stats using StatsOracle')
def test_06_intervn_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends intervn stats to the collector
and checks if intervn stats can be accessed from
QE.
'''
logging.info("*** test_06_intervn_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
# set the start time in analytics db 1 hour earlier than
# the current time. For flow series test, we need to create
# flow samples older than the current time.
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time))
assert generator_obj.verify_on_setup()
logging.info("Starting intervn gen " + str(UTCTimestampUsec()))
generator_obj.generate_intervn()
logging.info("Ending intervn gen " + str(UTCTimestampUsec()))
assert vizd_obj.verify_intervn_all(generator_obj)
assert vizd_obj.verify_intervn_sum(generator_obj)
return True
# end test_06_intervn_query
<EMAIL>(' Messagetype and Objecttype queries')
def test_07_fieldname_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
It then queries the stats table for messagetypes
and objecttypes
'''
logging.info("*** test_07_fieldname_query ***")
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
assert vizd_obj.verify_fieldname_messagetype();
assert vizd_obj.verify_fieldname_objecttype();
return True;
#end test_07_fieldname_query
<EMAIL>('verify send-tracebuffer')
def test_08_send_tracebuffer(self):
'''
This test verifies /analytics/send-tracebuffer/ REST API.
Opserver publishes the request to send trace buffer to all
the redis-uve instances. Collector forwards the request to
the appropriate generator(s). Generator sends the tracebuffer
to the Collector which then dumps the trace messages in the
analytics db. Verify that the trace messages are written in
the analytics db.
'''
logging.info('*** test_08_send_tracebuffer ***')
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.cassandra_port,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
# Make sure the contrail-collector is connected to the redis-uve before
# sending the | |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Contains functionality for managing conda environment dependencies.
Use the :class:`azureml.core.conda_dependencies.CondaDependencies` class to load existing conda environment
files and configure and manage new environments where experiments execute.
"""
import os
import ruamel.yaml
import re
from azureml._base_sdk_common.common import get_run_config_dir_name
from azureml._base_sdk_common.common import normalize_windows_paths
from azureml._base_sdk_common import __version__ as VERSION
from pkg_resources import resource_stream
from azureml.exceptions import UserErrorException
from io import StringIO
BASE_PROJECT_MODULE = 'azureml._project'
BASE_PROJECT_FILE_RELATIVE_PATH = 'base_project_files/conda_dependencies.yml'
DEFAULT_SDK_ORIGIN = 'https://pypi.python.org/simple'
CONDA_FILE_NAME = 'auto_conda_dependencies.yml'
CHANNELS = 'channels'
PACKAGES = 'dependencies'
PIP = 'pip'
PYTHON_PREFIX = 'python'
VERSION_REGEX = re.compile(r'(\d+)\.(\d+)(\.(\d+))?([ab](\d+))?$')
CNTK_DEFAULT_VERSION = '2.5.1'
PYTHON_DEFAULT_VERSION = '3.6.2'
LINUX_PLATFORM = 'linux'
WINDOWS_PLATFORM = 'win32'
TENSORFLOW_DEFAULT_VERSION = '1.13.1'
PYTORCH_DEFAULT_VERSION = '1.0.0'
TORCHVISION_DEFAULT_VERSION = '0.2.1'
HOROVOD_DEFAULT_VERSION = '0.15.2'
DEFAULT_CHAINER_VERSION = "5.1.0"
CUPY_DEFAULT_VERSION = "cupy-cuda90"
CPU = 'cpu'
GPU = 'gpu'
CNTK_PACKAGE_PREFIX = 'cntk'
TENSORFLOW_PACKAGE_PREFIX = 'tensorflow'
PYTORCH_PACKAGE_PREFIX = 'torch'
TORCHVISION_PACKAGE_PREFIX = 'torchvision'
CHAINER_PACKAGE_PREFIX = "chainer"
INVALID_PATHON_MESSAGE = "Invalid python version {0}," + \
"only accept '3.5 and '3.6'"
class CondaDependencies(object):
"""Manages application dependencies in an Azure Machine Learning environment.
.. note::
If no parameters are specified, `azureml-defaults` is added as the only pip dependency.
If the ``conda_dependencies_file_path`` parameter is not specified, then
the CondaDependencies object contains only the Azure Machine Learning packages (`azureml-defaults`).
The `azureml-defaults` dependency will not be pinned to a specific version and will
target the latest version available on PyPi.
.. remarks::
You can load an existing conda environment file or choose to configure and manage
the application dependencies in memory. During experiment submission, a preparation step is executed
which creates and caches a conda environment within which the experiment executes.
If your dependency is available through both Conda and pip (from PyPi),
use the Conda version, as Conda packages typically come with pre-built binaries that make
installation more reliable. For more information, see `Understanding Conda
and Pip <https://www.anaconda.com/understanding-conda-and-pip/>`_.
See the repository https://github.com/Azure/AzureML-Containers for details on base image dependencies.
The following example shows how to add a package using the
:meth:`azureml.core.conda_dependencies.CondaDependencies.add_conda_package`.
.. code-block:: python
from azureml.core.environment import CondaDependencies
myenv = Environment(name="myenv")
conda_dep = CondaDependencies()
conda_dep.add_conda_package("scikit-learn")
Full sample is available from
https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/using-environments/using-environments.ipynb
A pip package can also be added and the dependencies set in the :class:`azureml.core.Environment` object.
.. code-block:: python
conda_dep.add_pip_package("pillow==5.4.1")
myenv.python.conda_dependencies=conda_dep
Full sample is available from
https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/using-environments/using-environments.ipynb
:param conda_dependencies_file_path: A local path to a conda configuration file. Using this parameter
allows for loading and editing of an existing Conda environment file.
:type conda_dependencies_file_path: str
"""
DEFAULT_NUMBER_OF_CONDA_PACKAGES = 0
DEFAULT_NUMBER_OF_PIP_PACKAGES = 0
_VALID_YML_KEYS = ['name', 'channels', 'dependencies', 'prefix']
@staticmethod
def _validate_yaml(ruamel_yaml_object):
if not isinstance(ruamel_yaml_object, dict):
raise UserErrorException("Environment error: not a valid YAML structure")
for key in ruamel_yaml_object.keys():
if not str(key) in CondaDependencies._VALID_YML_KEYS:
msg = "Environment error: unknown {} key in environment specification".format(str(key))
raise UserErrorException(msg)
def __init__(self, conda_dependencies_file_path=None, _underlying_structure=None):
"""Initialize a new object to manage dependencies."""
if conda_dependencies_file_path:
with open(conda_dependencies_file_path, "r") as input:
self._conda_dependencies = ruamel.yaml.round_trip_load(input)
elif _underlying_structure:
self._conda_dependencies = _underlying_structure
else:
with resource_stream(
BASE_PROJECT_MODULE,
BASE_PROJECT_FILE_RELATIVE_PATH
) as base_stream:
self._conda_dependencies = ruamel.yaml.round_trip_load(base_stream)
base_stream.close()
CondaDependencies._validate_yaml(self._conda_dependencies)
self._python_version = self.get_python_version()
@staticmethod
def create(pip_indexurl=None, pip_packages=None, conda_packages=None,
python_version=PYTHON_DEFAULT_VERSION, pin_sdk_version=True):
r"""Initialize a new CondaDependencies object.
Returns an instance of a CondaDependencies object with user specified dependencies.
.. note::
If `pip_packages` is not specified, `azureml-defaults` will be added as the default dependencies. User \
specified `pip_packages` dependencies will override the default values.
If `pin_sdk_version` is set to true, pip dependencies of the packages distributed as a part of Azure \
Machine Learning Python SDK will be pinned to the SDK version installed in the current environment.
:param pip_indexurl: The pip index URL. If not specified, the SDK origin index URL will be used.
:type pip_indexurl: str
:param pip_packages: A list of pip packages.
:type pip_packages: builtin.list[str]
:param conda_packages: A list of conda packages.
:type conda_packages: builtin.list[str]
:param python_version: The Python version.
:type python_version: str
:param pin_sdk_version: Indicates whether to pin SDK packages to the client version.
:type pin_sdk_version: bool
:return: A conda dependency object.
:rtype: azureml.core.conda_dependencies.CondaDependencies
"""
cd = CondaDependencies()
_sdk_origin_url = CondaDependencies.sdk_origin_url().rstrip('/')
# set index url to sdk origin pypi index if not specified
if pip_indexurl or _sdk_origin_url != DEFAULT_SDK_ORIGIN:
cd.set_pip_index_url(
"--index-url {}".format(pip_indexurl if pip_indexurl else _sdk_origin_url))
cd.set_pip_option("--extra-index-url {}".format(DEFAULT_SDK_ORIGIN))
cd.set_python_version(python_version)
if pip_packages is None:
pip_packages = ['azureml-defaults']
else:
# clear defaults if pip packages were specified
for package in cd.pip_packages:
cd.remove_pip_package(package)
scope = CondaDependencies._sdk_scope()
# adding specified pip packages
for package in pip_packages:
# pin current sdk version assuming all azureml-* are a part of sdk
if pin_sdk_version and cd._get_package_name(package) in scope:
cd.add_pip_package("{}~={}".format(cd._get_package_name_with_extras(package), VERSION))
else:
cd.add_pip_package(package)
if conda_packages:
# clear defaults if conda packages were specified
for conda_package in conda_packages:
cd.remove_conda_package(conda_package)
# adding specified conda packages
for conda_package in conda_packages:
cd.add_conda_package(conda_package)
return cd
@staticmethod
def merge_requirements(requirements):
"""Merge package requirements.
:param requirements: A list of packages requirements to merge.
:type requirements: builtin.list[str]
:return: A list of merged package requirements.
:rtype: builtin.list[str]
"""
packages = {}
for req in requirements:
package = CondaDependencies._get_package_name(req)
if packages.get(package, None):
packages[package].append(req[len(package):].strip())
else:
packages[package] = [req[len(package):].strip()]
newpackages = []
for pack, req in packages.items():
newpackages.append("{}{}".format(pack, ",".join([x for x in req if x])))
return newpackages
@staticmethod
def _sdk_scope():
"""Return list of SDK packages.
:return: list of SDK packages
:rtype: list
"""
from azureml._project.project_manager import _sdk_scope
return _sdk_scope()
@staticmethod
def sdk_origin_url():
"""Return the SDK origin index URL.
:return: Returns the SDK origin index URL.
:rtype: str
"""
from azureml._project.project_manager import _current_index
index = _current_index()
if index:
return index
else:
return "https://pypi.python.org/simple"
def _merge_dependencies(self, conda_dependencies):
if not conda_dependencies:
return
# merge channels, conda dependencies, pip packages
for channel in conda_dependencies.conda_channels:
self.add_channel(channel)
for package in conda_dependencies.conda_packages:
self.add_conda_package(package)
for package in conda_dependencies.pip_packages:
self.add_pip_package(package)
def set_pip_index_url(self, index_url):
"""Set pip index URL.
:param index_url: The pip index URL to use.
:type index_url: str
"""
self.set_pip_option(index_url)
@property
def conda_channels(self):
"""Return conda channels.
:return: Returns the channel dependencies. The returned dependencies are a copy, and any changes to the
returned channels won't update the conda channels in this object.
:rtype: iter
"""
conda_channels = []
# We are returning a copy because self._conda_dependencies[CHANNELS] is of CommentedSeq ruamel.yaml
# type, which extends from list. But, self._conda_dependencies[CHANNELS] contains the
# comments and list elements, so in case user removes some element then it would mess up the
# self._conda_dependencies[CHANNELS] and conda file completely.
# Any edits to self._conda_dependencies[CHANNELS] should be done using the provided public
# methods in this class.
if CHANNELS in self._conda_dependencies:
for ditem in self._conda_dependencies[CHANNELS]:
conda_channels.append(ditem)
return iter(conda_channels)
@property
def conda_packages(self):
"""Return conda packages.
:return: Returns the package dependencies. Returns a copy of conda packages, and any edits to
the returned list won't be reflected in the conda packages of this object.
:rtype: iter
"""
conda_dependencies = []
if PACKAGES in self._conda_dependencies:
for ditem in self._conda_dependencies[PACKAGES]:
if PIP not in ditem and not isinstance(ditem, dict):
conda_dependencies.append(ditem)
if PIP in ditem and isinstance(ditem, str):
conda_dependencies.append(ditem)
return iter(conda_dependencies)
def _is_option(self, item, startswith='-'):
"""Check if parameter is option.
:param item:
:type item: str
:param startswith:
:type startswith: char
:return: Returns if item starts with '-'
:rtype: bool
"""
return item.startswith('-')
def _filter_options(self, items, startswith='-', keep=True):
"""Filter options.
:param items:
:type items: builtin.list
:param startswith:
:type startswith: char
:param keep:
:type keep: bool
:return: Returns the filtered options
:rtype: builtin.list
"""
if keep:
return [x for x in items if self._is_option(x, startswith)]
else:
return [x for x in items if not self._is_option(x, startswith)]
@property
def pip_packages(self):
"""Return pip dependencies.
:return: Returns the pip dependencies. Returns a copy of pip packages, and any edits to
the returned list won't be reflected in the pip packages of this object.
:rtype: iter
"""
pip_dependencies = []
if PACKAGES in self._conda_dependencies:
for ditem in self._conda_dependencies[PACKAGES]:
if PIP in ditem and isinstance(ditem, dict):
pip_dependencies = self._filter_options(ditem[PIP], keep=False)
return iter(pip_dependencies)
@property
def pip_options(self):
"""Return pip options.
:return: Returns the pip options. Returns a copy of pip options, and any edits to
the returned list won't be reflected in the pip options of this object.
:rtype: iter
"""
pip_options = []
if PACKAGES in self._conda_dependencies:
for ditem in self._conda_dependencies[PACKAGES]:
if PIP in ditem and isinstance(ditem, dict):
pip_options = self._filter_options(ditem[PIP])
return iter(pip_options)
def get_default_number_of_packages(self):
"""Return the default number of packages.
:return: | |
import shlex
import unittest
import mock
import pytest
import requests
from vt.utils import VittlifyError
from vt.vt import (
Status,
add,
categories,
complete,
display_all_shopping_lists,
display_item,
display_shopping_list,
display_shopping_list_categories,
help,
modify,
move,
run,
show,
term,
)
class TestDisplayShoppingList(unittest.TestCase):
def setUp(self):
self.get_shopping_list_info_patcher = mock.patch('vt.vt.get_shopping_list_info')
self.mock_get_shopping_list_info = self.get_shopping_list_info_patcher.start()
self.get_shopping_list_items_patcher = mock.patch(
'vt.vt.get_shopping_list_items'
)
self.mock_get_shopping_list_items = self.get_shopping_list_items_patcher.start()
self.get_completed_patcher = mock.patch('vt.vt.get_completed')
self.mock_get_completed = self.get_completed_patcher.start()
self.get_all_shopping_list_items_patcher = mock.patch(
'vt.vt.get_all_shopping_list_items'
)
self.mock_get_all_shopping_list_items = (
self.get_all_shopping_list_items_patcher.start()
)
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
test_shopping_list = {'name': 'test_list'}
self.mock_get_shopping_list_info.return_value = test_shopping_list
test_items = [
{'name': 'item1'},
{'name': 'item2'},
{'name': 'item3'},
]
self.mock_get_shopping_list_items.return_value = test_items
self.mock_get_all_shopping_list_items.return_value = test_items
self.mock_get_completed.return_value = test_items
self.mock_format_row.side_effect = [
'formatted_row_1',
'formatted_row_2',
'formatted_row_3',
]
def tearDown(self):
self.get_shopping_list_info_patcher.stop()
self.get_shopping_list_items_patcher.stop()
self.get_completed_patcher.stop()
self.get_all_shopping_list_items_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_not_completed(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.NOT_COMPLETED)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_all(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.ALL)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_all_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_completed(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.COMPLETED)
self.assertFalse(self.mock_get_shopping_list_info.called)
self.mock_get_completed.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='Recently Completed',
quiet=False,
)
def test_not_completed_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.NOT_COMPLETED, extended=True)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_all_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.ALL, extended=True)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_all_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_completed_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.COMPLETED, extended=True)
self.assertFalse(self.mock_get_shopping_list_info.called)
self.mock_get_completed.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='Recently Completed',
quiet=False,
)
class TestDisplayItem(unittest.TestCase):
def setUp(self):
self.get_item_patcher = mock.patch('vt.vt.get_item')
self.mock_get_item = self.get_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
self.test_guid = 'test_guid'
def tearDown(self):
self.get_item_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_(self):
display_item(self.test_guid)
self.mock_get_item.assert_called_once_with(self.test_guid)
self.mock_format_row.assert_called_once_with(
self.mock_get_item.return_value, None, include_comments=True, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestDisplayAllShoppingLists(unittest.TestCase):
def setUp(self):
self.get_all_shopping_lists_patcher = mock.patch('vt.vt.get_all_shopping_lists')
self.mock_get_all_shopping_lists = self.get_all_shopping_lists_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
self.mock_get_all_shopping_lists.return_value = [
{'name': 'list1'},
{'name': 'list2'},
{'name': 'list3'},
]
self.mock_format_row.side_effect = [
'formatted_row_1',
'formatted_row_2',
'formatted_row_3',
]
def tearDown(self):
self.get_all_shopping_lists_patcher.stop()
self.format_row_patcher.stop()
def test_(self):
display_all_shopping_lists()
self.mock_get_all_shopping_lists.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call({'name': 'list1'}, None, no_wrap=False),
mock.call({'name': 'list2'}, None, no_wrap=False),
mock.call({'name': 'list3'}, None, no_wrap=False),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'], title='All Lists'
)
class TestShowNoDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', '')
self.DEFAULT_LIST_patcher.start()
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.display_all_shopping_lists_patcher = mock.patch(
'vt.vt.display_all_shopping_lists'
)
self.mock_display_all_shopping_lists = (
self.display_all_shopping_lists_patcher.start()
)
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
def tearDown(self):
self.DEFAULT_LIST_patcher.stop()
self.display_shopping_list_patcher.stop()
self.display_all_shopping_lists_patcher.stop()
self.display_item_patcher.stop()
def test_list_empty_guid(self):
args = shlex.split("list ''")
self.assertRaises(IndexError, show, args)
def test_list_no_guid(self):
args = shlex.split("list")
self.assertRaises(IndexError, show, args)
def test_list_empty_guid_extended(self):
args = shlex.split("list '' -e")
self.assertRaises(IndexError, show, args)
def test_list_no_guid_extended(self):
args = shlex.split("list -e")
self.assertRaises(IndexError, show, args)
def test_list_no_extended(self):
args = shlex.split("list test_guid")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_list_extended(self):
args = shlex.split("list test_guid -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='test_guid',
extended=True,
)
def test_lists(self):
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
def test_item_no_guid(self):
args = shlex.split("item")
self.assertRaises(IndexError, show, args)
def test_item_empty_guid(self):
args = shlex.split("item ''")
self.assertRaises(IndexError, show, args)
def test_item(self):
args = shlex.split("item test_guid")
show(args)
self.mock_display_item.assert_called_once_with('test_guid')
class TestShowDefaultList:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', 'default_list')
self.DEFAULT_LIST_patcher.start()
self.parse_options_patcher = mock.patch('vt.vt.parse_options')
self.mock_parse_options = self.parse_options_patcher.start()
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.display_all_shopping_lists_patcher = mock.patch(
'vt.vt.display_all_shopping_lists'
)
self.mock_display_all_shopping_lists = (
self.display_all_shopping_lists_patcher.start()
)
self.display_shopping_list_categories_patcher = mock.patch(
'vt.vt.display_shopping_list_categories'
)
self.mock_display_shopping_list_categories = (
self.display_shopping_list_categories_patcher.start()
)
mocker.patch.object(term, 'red', autospec=True)
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
self.mock_parse_options.return_value = {}
yield
self.DEFAULT_LIST_patcher.stop()
self.parse_options_patcher.stop()
self.display_shopping_list_patcher.stop()
self.display_all_shopping_lists_patcher.stop()
self.display_item_patcher.stop()
self.display_shopping_list_categories_patcher.stop()
def test_list_empty_guid(self):
args = shlex.split("list ''")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='default_list')
def test_list_no_guid(self):
args = shlex.split("list")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='default_list')
def test_list_empty_guid_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list '' -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='default_list', extended=True
)
def test_list_no_guid_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='default_list', extended=True
)
def test_list_no_extended(self):
args = shlex.split("list test_guid")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_list_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list test_guid -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='test_guid',
extended=True,
)
def test_lists(self):
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
def test_item_no_guid(self):
args = shlex.split("item")
with pytest.raises(IndexError):
show(args)
def test_item_empty_guid(self):
args = shlex.split("item ''")
with pytest.raises(IndexError):
show(args)
def test_item(self):
args = shlex.split("item test_guid")
show(args)
self.mock_display_item.assert_called_once_with('test_guid')
def test_display_list_categories(self):
self.mock_parse_options.return_value = {
'categories': [{'name': 'type A'}, {'name': 'type B'}]
}
args = shlex.split("test_guid")
categories(args)
self.mock_display_shopping_list_categories.assert_called_once_with('test_guid')
def test_display_list_categories_raises(self):
self.mock_parse_options.return_value = {
'categories': [{'name': 'type A'}, {'name': 'type B'}]
}
self.mock_display_shopping_list_categories.side_effect = VittlifyError(
'Got an error'
)
args = shlex.split("test_guid")
categories(args)
term.red.assert_called_once_with('Got an error')
self.mock_display_shopping_list_categories.assert_called_once_with('test_guid')
def test_display_shopping_list_raises(self):
self.mock_display_shopping_list.side_effect = VittlifyError('Got an error')
args = shlex.split("list test_guid")
show(args)
term.red.assert_called_once_with('Got an error')
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_display_item_raises(self):
self.mock_display_item.side_effect = VittlifyError('Got an error')
args = shlex.split("show test_guid")
show(args)
term.red.assert_called_once_with('Got an error')
def test_display_all_shopping_lists_raises(self):
self.mock_display_all_shopping_lists.side_effect = VittlifyError('Got an error')
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
term.red.assert_called_once_with('Got an error')
class TestComplete:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.complete_item_patcher = mock.patch('vt.vt.complete_item')
self.mock_complete_item = self.complete_item_patcher.start()
self.mock_print = mocker.patch('builtins.print')
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.apply_strikethrough_patcher = mock.patch('vt.vt.apply_strikethrough')
self.mock_apply_strikethrough = self.apply_strikethrough_patcher.start()
self.mock_complete_item.return_value = {'name': 'test_name'}
self.mock_apply_strikethrough.return_value = 'struck_through'
yield
self.complete_item_patcher.stop()
self.apply_strikethrough_patcher.stop()
def test_complete(self):
args = shlex.split("test_guid")
complete(args)
self.mock_complete_item.assert_called_once_with('test_guid', uncomplete=False)
self.mock_apply_strikethrough.assert_called_once_with('test_name')
self.mock_print.assert_called_once_with(
f'Marked {term.magenta}struck_through{term.normal} as done.'
)
def test_uncomplete(self):
args = shlex.split("test_guid")
complete(args, uncomplete=True)
self.mock_complete_item.assert_called_once_with('test_guid', uncomplete=True)
self.mock_print.assert_called_once_with(
f'Marked {term.magenta}test_name{term.normal} undone.'
)
def test_done_extended(self):
args = shlex.split("-e")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(
extended=True, mode=Status.COMPLETED
)
def test_completed_no_extended(self):
args = shlex.split("")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(mode=Status.COMPLETED)
def test_completed_extended(self):
args = shlex.split("--extended")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(
extended=True, mode=Status.COMPLETED
)
class TestModify(unittest.TestCase):
def setUp(self):
self.modify_item_patcher = mock.patch('vt.vt.modify_item')
self.mock_modify_item = self.modify_item_patcher.start()
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
def tearDown(self):
self.modify_item_patcher.stop()
self.display_item_patcher.stop()
def test_no_options(self):
args = shlex.split("test_guid this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with('test_guid', 'this is a comment')
self.mock_display_item.assert_called_once_with('test_guid')
def test_with_short_options(self):
args = shlex.split("test_guid -a this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with(
'test_guid', 'this is a comment', append=True
)
self.mock_display_item.assert_called_once_with('test_guid')
def test_with_options(self):
args = shlex.split("test_guid --append this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with(
'test_guid', 'this is a comment', append=True
)
self.mock_display_item.assert_called_once_with('test_guid')
class TestAddDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', 'default_list')
self.DEFAULT_LIST_patcher.start()
self.add_item_patcher = mock.patch('vt.vt.add_item')
self.mock_add_item = self.add_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
def tearDown(self):
self.add_item_patcher.stop()
self.DEFAULT_LIST_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_no_guid(self):
args = shlex.split("'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('default_list', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
def test_with_guid(self):
args = shlex.split("test_guid 'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('test_guid', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestAddNoDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', None)
self.DEFAULT_LIST_patcher.start()
self.add_item_patcher = mock.patch('vt.vt.add_item')
self.mock_add_item = self.add_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
def tearDown(self):
self.add_item_patcher.stop()
self.DEFAULT_LIST_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_no_guid(self):
args = shlex.split("'this is a new item'")
self.assertRaises(IndexError, add, args)
def test_with_guid(self):
args = shlex.split("test_guid 'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('test_guid', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestMove:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.move_item_patcher = mock.patch('vt.vt.move_item')
self.mock_move_item = self.move_item_patcher.start()
self.mock_print = mocker.patch('builtins.print')
yield
self.move_item_patcher.stop()
def test_(self):
args = shlex.split('test_guid to_list_guid')
move(args)
self.mock_move_item.assert_called_once_with('test_guid', 'to_list_guid')
self.mock_print.assert_called_once_with(
f'Moved item {term.blue}test_guid{term.normal} to list {term.blue}to_list_guid{term.normal}'
)
class TestRun:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.show_patcher = mock.patch('vt.vt.show')
self.mock_show = self.show_patcher.start()
self.complete_patcher = mock.patch('vt.vt.complete')
self.mock_complete = self.complete_patcher.start()
self.modify_patcher = mock.patch('vt.vt.modify')
self.mock_modify = self.modify_patcher.start()
self.add_patcher = mock.patch('vt.vt.add')
self.mock_add = self.add_patcher.start()
self.move_patcher = mock.patch('vt.vt.move')
self.mock_move = self.move_patcher.start()
mocker.patch.object(term, 'red', autospec=True)
self.SHOW_TRACEBACK_patcher = mock.patch('vt.vt.SHOW_TRACEBACK', False)
self.SHOW_TRACEBACK_patcher.start()
self.PROXY_patcher = mock.patch('vt.vt.PROXY', False)
self.PROXY_patcher.start()
self.VITTLIFY_URL_patcher = | |
= A.diagonalize()
return P, D
def jordan(self, Am, calc=True):
A = sp.Matrix(Am)
if calc is True:
Pa, Ja = A.jordan_form(calc_transform=calc)
return Pa, Ja
if calc is False:
Jb = A.jordan_form(calc_transform=calc)
return Jb
def char(self, Am):
A = sp.Matrix(Am)
M, N = np.array(Am).shape
λ = sp.Symbol('λ')
II = sp.eye(M)
Ad = A - II*λ
Deta = Ad.det()
return Deta
def sroots(self, Am, tol=None, nn=None, char=None):
#returns sympy format, python format
λ = sp.Symbol('λ')
if tol == None:
tol = 10**-15
if nn == None:
nv = 15
elif nn != None:
nv = int(nn)
if char == None:
A = sp.Matrix(Am)
M, N = np.array(Am).shape
II = sp.eye(M)
Ad = A - II*λ
Deta = Ad.det()
elif char != None:
Deta = char
Detsimp = sp.nfloat(sp.nsimplify(Deta, tolerance=tol, full=True), n=nv)
rlist = list(sp.solve(Detsimp, λ, **{'set': True ,'particular': True}))
rootsd = [sp.nsimplify(i, tolerance=tol, full=True) for i in rlist]
sympl = []
numpr = []
numpi = []
for i, j in enumerate(rootsd):
sympl.append(sp.simplify(sp.nfloat(rootsd[i], n=nv), rational=False))
vals = j.as_real_imag()
vali = sp.nfloat(vals[0], n=nv)
valj = sp.nfloat(vals[1], n=nv)
numpr.append(vali)
numpi.append(valj)
reals = []
iss = []
simps = []
for i, j in zip(numpr, numpi):
reale = i
compe = j
reals.append(reale)
if compe != 0.0:
iss.append(compe)
simps.append(complex(i,j))
else:
simps.append(reale)
return rlist, simps
def alpha(self, eigl):
eigs = list(eigl)
lambds = []
alps = []
betas = []
for i in eigs:
rr = i.real
ii = i.imag
ii2 = -ii
if rr not in alps and ii != 0.0:
alps.append(rr)
if ii not in betas and ii2 not in betas and ii != 0.0:
betas.append(ii)
if ii == 0.0:
lambds.append(rr)
return lambds, alps, betas
def block(self, ls, als, bs):
matrices = []
for i in range(len(als)):
matrixi = sp.Matrix([[als[i], -bs[i]], [bs[i], als[i]]])
matrices.append(matrixi)
B = sp.BlockDiagMatrix(sp.Matrix([ls]),*matrices)
return B
def eigs(self, Mm):
rs = []
cs = []
ff = []
Eigs = list(sc.linalg.eigvals(np.array(Mm, dtype=float)))
for ei in Eigs:
ri = ei.real
ci = ei.imag
if ci == 0.0 or ci == 0:
ff.append(ri)
elif len(rs) > 0:
ril = rs[-1]
if ril != ri:
rs.append(ri)
cs.append(ci)
else:
rs.append(ri)
cs.append(-1.0*ci)
else:
rs.append(ri)
cs.append(ci)
if len(rs) > 0:
for ris, cis in zip(rs, cs):
ind = rs.index(ris)
r = ris
iz = cis
z = complex(r, iz)
zc = z.conjugate()
ff.append(z)
return ff
def Tmat(self, Am):
A = sp.Matrix(Am)
eis = self.eigs(Am)
M, N = A.shape
Xi = sp.symarray('x',M)
listd = [sp.symbols('x_{}'.format(i)) for i in range(M)]
llist = [sp.symbols('x_{}'.format(i)) for i in range(M-1)]
T = []
realss = []
comps = []
imagine = bool(False)
count = 0
for i in eis:
II = sp.eye(M)
Ad = A - i*II
AA = sp.matrix2numpy(Ad*sp.Matrix(Xi))
AAf = self.flatten(AA)
ss = sp.nonlinsolve(AAf, llist)
Xvec = list(ss.args[0].subs({listd[-1] : 1.00})) # One is used by default but may be wrong in certain situations
for iss in Xvec:
indexx = Xvec.index(iss)
Xvec[indexx] = sp.simplify(iss)
XXvec = []
for ii,jb in enumerate(Xvec):
vall = jb
Xvec[ii] = vall
vali = sp.re(vall)
valj = sp.im(vall)
realss.append(vali)
comps.append(valj)
if valj != 0.0:
imagine = bool(True)
if imagine == True:
count += 1
realss.insert(len(realss), 1)
comps.insert(len(comps), 0)
if count % 2 == 0 and imagine == False:
T.append(realss[:])
realss.clear()
comps.clear()
elif count % 2 != 0 and imagine == True:
T.append(realss[:])
realss.clear()
comps.clear()
elif count % 2 == 0 and imagine == True:
T.append(comps[:])
realss.clear()
comps.clear()
Xvec.clear()
XXvec.clear()
T_matrix = self.Tt(T)
for i in range(len(T_matrix)):
for j in range(len(T_matrix[0])):
ijval = float("{:.25f}".format(T_matrix[i][j]))
T_matrix[i][j] = ijval
TI_matrix = self.inv(T_matrix)
return T_matrix , TI_matrix
def zeros_matrix(self, rows, cols):
"""
Creates a matrix filled with zeros.
:param rows: the number of rows the matrix should have
:param cols: the number of columns the matrix should have
:return: list of lists that form the matrix
"""
M = []
while len(M) < rows:
M.append([])
while len(M[-1]) < cols:
M[-1].append(0.0)
return M
def identity_matrix(self, n):
"""
Creates and returns an identity matrix.
:param n: the square size of the matrix
:return: a square identity matrix
"""
IdM = self.zeros_matrix(n, n)
for i in range(n):
IdM[i][i] = 1.0
return IdM
def copy_matrix(self, M):
"""
Creates and returns a copy of a matrix.
:param M: The matrix to be copied
:return: A copy of the given matrix
"""
# Section 1: Get matrix dimensions
rows = len(M)
cols = len(M[0])
# Section 2: Create a new matrix of zeros
MC = self.zeros_matrix(rows, cols)
# Section 3: Copy values of M into the copy
for i in range(rows):
for j in range(cols):
MC[i][j] = M[i][j]
return MC
def check_matrix_equality(self, Am, Bm, tol=None):
"""
Checks the equality of two matrices.
:param A: The first matrix
:param B: The second matrix
:param tol: The decimal place tolerance of the check
:return: The boolean result of the equality check
"""
# Section 1: First ensure matrices have same dimensions
if len(Am) != len(Bm) or len(Am[0]) != len(Bm[0]):
return False
# Section 2: Check element by element equality
# use tolerance if given
for i in range(len(Am)):
for j in range(len(Am[0])):
if tol is None:
if Am[i][j] != Bm[i][j]:
return False
else:
if round(Am[i][j], tol) != round(Bm[i][j], tol):
return False
return True
def check_squareness(self, Am):
"""
Makes sure that a matrix is square
:param A: The matrix to be checked.
"""
if len(Am) != len(Am[0]):
raise ArithmeticError("Matrix must be square to inverse.")
def matrix_multiply(self, Am, Bm):
"""
Returns the product of the matrix A * B
:param A: The first matrix - ORDER MATTERS!
:param B: The second matrix
:return: The product of the two matrices
"""
# Section 1: Ensure A & B dimensions are correct for multiplication
rowsA = len(Am)
colsA = len(Am[0])
rowsB = len(Bm)
colsB = len(Bm[0])
if colsA != rowsB:
raise ArithmeticError(
'Number of A columns must equal number of B rows.')
# Section 2: Store matrix multiplication in a new matrix
C = self.zeros_matrix(rowsA, colsB)
for i in range(rowsA):
for j in range(colsB):
total = 0
for ii in range(colsA):
total += Am[i][ii] * Bm[ii][j]
C[i][j] = total
return C
def detr(self, Am, total=0):
"""
Find determinant of a square matrix using full recursion
:param A: the matrix to find the determinant for
:param total=0: safely establish a total at each recursion level
:returns: the running total for the levels of recursion
"""
# Section 1: store indices in list for flexible row referencing
indices = list(range(len(Am)))
# Section 2: when at 2x2 submatrices recursive calls end
if len(Am) == 2 and len(Am[0]) == 2:
val = Am[0][0] * Am[1][1] - Am[1][0] * Am[0][1]
return val
# Section 3: define submatrix for focus column and call this function
for fc in indices: # for each focus column, find the submatrix ...
As = self.copy_matrix(Am) # make a copy, and ...
As = As[1:] # ... remove the first row
height = len(As)
for i in range(height): # for each remaining row of submatrix ...
As[i] = As[i][0:fc] + As[i][fc+1:] # zero focus column elements
sign = (-1) ** (fc % 2) # alternate signs for submatrix multiplier
sub_det = self.detr(As) # pass submatrix recursively
total += sign * Am[0][fc] * sub_det # total all returns from recursion
return total
def detf(self, Am):
# Section 1: Establish n parameter and copy A
n = len(Am)
AM = self.copy_matrix(Am)
# Section 2: Row ops on A to get in upper triangle form
for fd in range(n): # A) fd stands for focus diagonal
for i in range(fd+1,n): # B) only use rows below fd row
if AM[fd][fd] == 0: # C) if diagonal is zero | |
<gh_stars>1-10
import numpy
import SLIX
from SLIX.CPU._toolbox import _direction, _prominence, _peakwidth, \
_peakdistance, _centroid, _centroid_correction_bases, _peaks
__all__ = ['TARGET_PROMINENCE', 'peaks',
'peak_width', 'peak_prominence',
'peak_distance', 'mean_peak_distance',
'background_mask', 'mean_peak_width',
'direction', 'num_peaks', 'mean_peak_prominence',
'unit_vectors', 'centroid_correction', 'normalize']
TARGET_PROMINENCE = 0.08
def background_mask(image):
"""
Creates a background mask by setting all image pixels with low scattering
signals to zero. As all background pixels are near zero for all images in
the SLI image stack, this method should remove most of the background
allowing for better approximations using the available features.
It is advised to use this function.
Args:
image: Complete SLI measurement image stack as a 2D/3D Numpy array
threshold: Threshhold for mask creation (default: 10)
Returns:
numpy.array: 1D/2D-image which masks the background as True and
foreground as False
"""
avg_image = numpy.average(image, axis=-1)
# Set histogram to a range of 0 to 1 ignoring any outliers.
hist_avg_image = avg_image / numpy.percentile(avg_image, 99)
# Generate histogram in range of 0 to 1 to ignore outliers again. We search for values at the beginning anyway.
avg_hist, avg_bins = numpy.histogram(hist_avg_image, bins=256, range=(0, 1))
# Use SLIX to search for significant peaks in the histogram
avg_hist = avg_hist[numpy.newaxis, numpy.newaxis, ...]
peaks = SLIX.toolbox.significant_peaks(image=avg_hist).flatten()
# Reverse the histogram to search for minimal values with SLIX (again)
avg_hist = -avg_hist
reversed_peaks = SLIX.toolbox.significant_peaks(image=avg_hist).flatten()
# We can now calculate the index of our background threshold using the reversed_peaks
index = numpy.argmax(peaks) + numpy.argmax(reversed_peaks[numpy.argmax(peaks):])
# Reverse from 0 to 1 to original image scale and calculate the threshold position
threshold = avg_bins[index] * numpy.percentile(avg_image, 99)
# Return a mask with the calculated background image
return avg_image < threshold
def peaks(image):
"""
Detect all peaks from a full SLI measurement. Peaks will not be filtered
in any way. To detect only significant peaks, filter the peaks by using
the prominence as a threshold.
Args:
image: Complete SLI measurement image stack as a 2D/3D Numpy array
Returns:
2D/3D boolean image containing masking the peaks with `True`
"""
image = numpy.array(image, dtype=numpy.float32)
reshape = False
if len(image.shape) == 3:
reshape = True
[image_x, image_y, image_z] = image.shape
image = image.reshape(image_x * image_y, image_z)
resulting_image = _peaks(image)
if reshape:
image = image.reshape(image_x, image_y, image_z)
resulting_image = resulting_image.reshape(image_x, image_y, image_z)
return resulting_image.astype('bool')
def num_peaks(image=None, peak_image=None):
"""
Calculate the number of peaks from each line profile in an SLI image series
by detecting all peaks and applying thresholds to remove unwanted peaks.
Args:
image: Full SLI measurement (series of images) which is prepared for the
pipeline using the SLIX toolbox methods.
peak_image: Boolean NumPy array specifying the peak positions in the full SLI stack
Returns:
Array where each entry corresponds to the number of detected peaks within
the first dimension of the SLI image series.
"""
if peak_image is None and image is not None:
peak_image = peaks(image)
elif peak_image is not None:
peak_image = numpy.array(peak_image)
else:
raise ValueError('Either image or peak_image has to be defined.')
return numpy.count_nonzero(peak_image, axis=-1).astype(numpy.uint16)
def normalize(image, kind_of_normalization=0):
"""
Normalize given line profile by using a normalization technique based on
the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Arguments:
image: Full SLI measurement (series of images) which is
prepared for the pipeline using the SLIX toolbox methods.
kind_of_normalization: Normalization technique which will be used for
the calculation
Returns:
numpy.array -- Image where each pixel is normalized by the last axis
of the image
"""
image = numpy.array(image, dtype=numpy.float32)
if kind_of_normalization == 0:
image = (image - image.min(axis=-1)[..., None]) \
/ numpy.maximum(1e-15, image.max(axis=-1)[..., None] -
image.min(axis=-1)[..., None])
else:
image = image / \
numpy.maximum(1e-15, numpy.mean(image, axis=-1)[..., None])
return image
def peak_prominence(image, peak_image=None, kind_of_normalization=0):
"""
Calculate the peak prominence of all given peak positions within a line
profile. The line profile will be normalized by dividing the line profile
through its mean value. Therefore, values above 1 are possible.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
kind_of_normalization: Normalize given line profile by using a
normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Returns:
Floating point value containing the mean peak prominence of the line
profile in degrees. the mean peak prominence of the line
profile in degrees.
"""
image = numpy.array(image, dtype=numpy.float32)
if peak_image is None:
peak_image = peaks(image).astype('uint8')
else:
peak_image = numpy.array(peak_image).astype('uint8')
image = normalize(image, kind_of_normalization)
[image_x, image_y, image_z] = image.shape
image = image.reshape(image_x * image_y, image_z)
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
result_img = _prominence(image, peak_image)
result_img = result_img.reshape((image_x, image_y, image_z))
return result_img
def mean_peak_prominence(image, peak_image=None, kind_of_normalization=0):
"""
Calculate the mean peak prominence of all given peak positions within a
line profile. The line profile will be normalized by dividing the line
profile through its mean value. Therefore, values above 1 are possible.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
kind_of_normalization: Normalize given line profile by using a
normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Returns:
Floating point value containing the mean peak prominence of the line
profile in degrees.
"""
if peak_image is not None:
peak_image = numpy.array(peak_image).astype('uint8')
else:
peak_image = peaks(image).astype('uint8')
result_img = peak_prominence(image, peak_image, kind_of_normalization)
result_img = numpy.sum(result_img, axis=-1) / \
numpy.maximum(1, numpy.count_nonzero(peak_image, axis=-1))
return result_img.astype('float32')
def peak_width(image, peak_image=None, target_height=0.5):
"""
Calculate the peak width of all given peak positions within a line profile.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
target_height: Relative peak height in relation to the prominence of the
given peak.
Returns:
NumPy array where each entry corresponds to the peak width of the line
profile. The values are in degree.
"""
image = numpy.array(image, dtype='float32')
if peak_image is not None:
peak_image = numpy.array(peak_image).astype('uint8')
else:
peak_image = peaks(image).astype('uint8')
[image_x, image_y, image_z] = image.shape
image = image.reshape(image_x * image_y, image_z)
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
prominence = _prominence(image, peak_image)
result_image = _peakwidth(image, peak_image, prominence, target_height)
result_image = result_image.reshape((image_x, image_y, image_z))
result_image = result_image * 360.0 / image_z
return result_image
def mean_peak_width(image, peak_image=None, target_height=0.5):
"""
Calculate the mean peak width of all given peak positions within a line
profile.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
target_height: Relative peak height in relation to the prominence of the
given peak.
Returns:
NumPy array where each entry corresponds to the mean peak width of the
line profile. The values are in degree.
"""
if peak_image is not None:
peak_image = numpy.array(peak_image).astype('uint8')
else:
peak_image = peaks(image).astype('uint8')
result_img = peak_width(image, peak_image, target_height)
result_img = numpy.sum(result_img, axis=-1) / \
numpy.maximum(1, numpy.count_nonzero(peak_image, axis=-1))
return result_img
def peak_distance(peak_image, centroids):
"""
Calculate the mean peak distance in degrees between two corresponding peaks
for each line profile in an SLI image series.
Args:
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
centroids: Use centroid calculation to better determine the peak position
regardless of the number of
measurements / illumination angles used.
Returns:
NumPy array of floating point values containing the peak distance of the
line profiles in degrees in their respective peak position. The first peak
of each peak pair will show the distance between peak_1 and peak_2 while
the second peak | |
<reponame>levilucio/SyVOLT
"""
__property2_complete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: levi
Modified: Wed May 6 15:39:12 2015
________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from MT_pre__EClass import *
from MT_pre__EStructuralFeature import *
from MT_pre__Attribute import *
from MT_pre__Equation import *
from MT_pre__directLink_T import *
from MT_pre__directLink_S import *
from MT_pre__trace_link import *
from MT_pre__hasAttr_S import *
from MT_pre__hasAttr_T import *
from MT_pre__leftExpr import *
from MT_pre__rightExpr import *
from LHS import *
from graph_MT_pre__Equation import *
from graph_LHS import *
from graph_MT_pre__trace_link import *
from graph_MT_pre__hasAttr_T import *
from graph_MT_pre__hasAttr_S import *
from graph_MT_pre__EStructuralFeature import *
from graph_MT_pre__directLink_S import *
from graph_MT_pre__directLink_T import *
from graph_MT_pre__EClass import *
from graph_MT_pre__rightExpr import *
from graph_MT_pre__Attribute import *
from graph_MT_pre__leftExpr import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def property2_complete_MDL(self, rootNode, MT_pre__ECoreMMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__ECoreMM ---
if( MT_pre__ECoreMMRootNode ):
# author
MT_pre__ECoreMMRootNode.author.setValue('Annonymous')
# description
MT_pre__ECoreMMRootNode.description.setValue('\n')
MT_pre__ECoreMMRootNode.description.setHeight(15)
# name
MT_pre__ECoreMMRootNode.name.setValue('')
MT_pre__ECoreMMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('property2_complete')
# --- ASG attributes over ---
self.obj38=MT_pre__EClass(self)
self.obj38.isGraphObjectVisual = True
if(hasattr(self.obj38, '_setHierarchicalLink')):
self.obj38._setHierarchicalLink(False)
# MT_label__
self.obj38.MT_label__.setValue('1')
# MT_pivotOut__
self.obj38.MT_pivotOut__.setValue('')
self.obj38.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj38.MT_subtypeMatching__.setValue(('True', 0))
self.obj38.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj38.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj38.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj38.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj38.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj38.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj38.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj38.MT_pivotIn__.setValue('')
self.obj38.MT_pivotIn__.setNone()
self.obj38.graphClass_= graph_MT_pre__EClass
if self.genGraphics:
new_obj = graph_MT_pre__EClass(340.0,180.0,self.obj38)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__EClass", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj38.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj38)
self.globalAndLocalPostcondition(self.obj38, rootNode)
self.obj38.postAction( rootNode.CREATE )
self.obj39=MT_pre__EClass(self)
self.obj39.isGraphObjectVisual = True
if(hasattr(self.obj39, '_setHierarchicalLink')):
self.obj39._setHierarchicalLink(False)
# MT_label__
self.obj39.MT_label__.setValue('2')
# MT_pivotOut__
self.obj39.MT_pivotOut__.setValue('')
self.obj39.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj39.MT_subtypeMatching__.setValue(('True', 0))
self.obj39.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj39.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj39.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj39.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj39.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj39.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj39.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj39.MT_pivotIn__.setValue('')
self.obj39.MT_pivotIn__.setNone()
self.obj39.graphClass_= graph_MT_pre__EClass
if self.genGraphics:
new_obj = graph_MT_pre__EClass(340.0,320.0,self.obj39)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__EClass", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj39.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj39)
self.globalAndLocalPostcondition(self.obj39, rootNode)
self.obj39.postAction( rootNode.CREATE )
self.obj40=MT_pre__EStructuralFeature(self)
self.obj40.isGraphObjectVisual = True
if(hasattr(self.obj40, '_setHierarchicalLink')):
self.obj40._setHierarchicalLink(False)
# MT_label__
self.obj40.MT_label__.setValue('3')
# MT_pivotOut__
self.obj40.MT_pivotOut__.setValue('')
self.obj40.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj40.MT_subtypeMatching__.setValue(('True', 1))
self.obj40.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj40.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj40.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj40.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj40.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj40.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj40.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj40.MT_pivotIn__.setValue('')
self.obj40.MT_pivotIn__.setNone()
self.obj40.graphClass_= graph_MT_pre__EStructuralFeature
if self.genGraphics:
new_obj = graph_MT_pre__EStructuralFeature(500.0,180.0,self.obj40)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__EStructuralFeature", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj40.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj40)
self.globalAndLocalPostcondition(self.obj40, rootNode)
self.obj40.postAction( rootNode.CREATE )
self.obj41=MT_pre__EStructuralFeature(self)
self.obj41.isGraphObjectVisual = True
if(hasattr(self.obj41, '_setHierarchicalLink')):
self.obj41._setHierarchicalLink(False)
# MT_label__
self.obj41.MT_label__.setValue('4')
# MT_pivotOut__
self.obj41.MT_pivotOut__.setValue('')
self.obj41.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj41.MT_subtypeMatching__.setValue(('True', 1))
self.obj41.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj41.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj41.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj41.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj41.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj41.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj41.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj41.MT_pivotIn__.setValue('')
self.obj41.MT_pivotIn__.setNone()
self.obj41.graphClass_= graph_MT_pre__EStructuralFeature
if self.genGraphics:
new_obj = graph_MT_pre__EStructuralFeature(500.0,320.0,self.obj41)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__EStructuralFeature", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj41.graphObject_ = new_obj
# Add node to the | |
9 floats (so3 element)`)
t (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_setTransform(self, R, t)
def getVelocity(self):
"""
Returns the velocity of the link's origin given the robot's current joint
velocities.
"""
return _robotsim.RobotModelLink_getVelocity(self)
def getAngularVelocity(self):
"""
Returns the angular velocity of the link given the robot's current joint
velocities.
"""
return _robotsim.RobotModelLink_getAngularVelocity(self)
def getPointVelocity(self, plocal):
"""
Returns the world velocity of the point given the robot's current velocity.
Args:
plocal (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getPointVelocity(self, plocal)
def getJacobian(self, p):
"""
Returns the 6xn total jacobian of the local point p (row-major matrix) w.r.t.
the robot's configuration q.
Args:
p (:obj:`list of 3 floats`)
(the orientation jacobian is stacked on position jacobian)
"""
return _robotsim.RobotModelLink_getJacobian(self, p)
def getPositionJacobian(self, p):
"""
Returns the 3xn jacobian of the local point p (row-major matrix) w.r.t. the
robot's configuration q.
Args:
p (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getPositionJacobian(self, p)
def getOrientationJacobian(self):
"""
Returns the 3xn orientation jacobian of the link (row-major matrix) w.r.t. the
robot's configuration q.
"""
return _robotsim.RobotModelLink_getOrientationJacobian(self)
def getAcceleration(self, ddq):
"""
Returns the acceleration of the link origin given the robot's current joint
velocities and joint accelerations ddq.
Args:
ddq (:obj:`list of floats`)
ddq can be empty, which calculates the acceleration with acceleration 0, and is
a little faster than setting ddq to [0]*n
"""
return _robotsim.RobotModelLink_getAcceleration(self, ddq)
def getPointAcceleration(self, plocal, ddq):
"""
Returns the acceleration of the point given the robot's current joint velocities
and joint accelerations ddq.
Args:
plocal (:obj:`list of 3 floats`)
ddq (:obj:`list of floats`)
"""
return _robotsim.RobotModelLink_getPointAcceleration(self, plocal, ddq)
def getAngularAcceleration(self, ddq):
"""
Returns the angular acceleration of the link given the robot's current joint
velocities and joint accelerations ddq.
Args:
ddq (:obj:`list of floats`)
"""
return _robotsim.RobotModelLink_getAngularAcceleration(self, ddq)
def getPositionHessian(self, p):
"""
Returns the Hessians of each component of the position p w.r.t the robot's
configuration q. The result is a triple of nxn matrices corresponding to the
(x,y,z) components respectively.
Args:
p (:obj:`list of 3 floats`)
"""
return _robotsim.RobotModelLink_getPositionHessian(self, p)
def getOrientationHessian(self):
"""
Returns the Hessians of each orientation component of the link w.r.t the robot's
configuration q. The result is a triple of nxn matrices corresponding to the
(wx,wy,wz) components respectively.
"""
return _robotsim.RobotModelLink_getOrientationHessian(self)
def drawLocalGL(self, keepAppearance=True):
"""
Draws the link's geometry in its local frame. If keepAppearance=true, the
current Appearance is honored. Otherwise, just the geometry is drawn.
drawLocalGL (keepAppearance=True)
drawLocalGL ()
Args:
keepAppearance (bool, optional): default value True
"""
return _robotsim.RobotModelLink_drawLocalGL(self, keepAppearance)
def drawWorldGL(self, keepAppearance=True):
"""
Draws the link's geometry in the world frame. If keepAppearance=true, the
current Appearance is honored. Otherwise, just the geometry is drawn.
drawWorldGL (keepAppearance=True)
drawWorldGL ()
Args:
keepAppearance (bool, optional): default value True
"""
return _robotsim.RobotModelLink_drawWorldGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.RobotModelLink_world_set
__swig_getmethods__["world"] = _robotsim.RobotModelLink_world_get
if _newclass:
world = _swig_property(_robotsim.RobotModelLink_world_get, _robotsim.RobotModelLink_world_set)
__swig_setmethods__["robotIndex"] = _robotsim.RobotModelLink_robotIndex_set
__swig_getmethods__["robotIndex"] = _robotsim.RobotModelLink_robotIndex_get
if _newclass:
robotIndex = _swig_property(_robotsim.RobotModelLink_robotIndex_get, _robotsim.RobotModelLink_robotIndex_set)
__swig_setmethods__["robotPtr"] = _robotsim.RobotModelLink_robotPtr_set
__swig_getmethods__["robotPtr"] = _robotsim.RobotModelLink_robotPtr_get
if _newclass:
robotPtr = _swig_property(_robotsim.RobotModelLink_robotPtr_get, _robotsim.RobotModelLink_robotPtr_set)
__swig_setmethods__["index"] = _robotsim.RobotModelLink_index_set
__swig_getmethods__["index"] = _robotsim.RobotModelLink_index_get
if _newclass:
index = _swig_property(_robotsim.RobotModelLink_index_get, _robotsim.RobotModelLink_index_set)
__swig_destroy__ = _robotsim.delete_RobotModelLink
__del__ = lambda self: None
RobotModelLink_swigregister = _robotsim.RobotModelLink_swigregister
RobotModelLink_swigregister(RobotModelLink)
class RobotModelDriver(_object):
"""
A reference to a driver of a RobotModel.
A driver corresponds to one of the robot's actuators and encodes how its forces
are transmitted to joints.
A RobotModelDriver is not created by hand, but instead accessed using
:meth:`RobotModel.driver` (index or name)
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotModelDriver, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RobotModelDriver, name)
__repr__ = _swig_repr
def __init__(self):
"""
Returns:
(:obj:`RobotModelDriver`):
"""
this = _robotsim.new_RobotModelDriver()
try:
self.this.append(this)
except Exception:
self.this = this
def getName(self):
"""
Returns:
(str):
"""
return _robotsim.RobotModelDriver_getName(self)
def robot(self):
"""
Returns a reference to the driver's robot.
Returns:
(:class:`~klampt.RobotModel`):
"""
return _robotsim.RobotModelDriver_robot(self)
def getType(self):
"""
Currently can be "normal", "affine", "rotation", "translation", or
"custom".
Returns:
(str):
"""
return _robotsim.RobotModelDriver_getType(self)
def getAffectedLink(self):
"""
Returns the single affected link for "normal" links.
Returns:
(int):
"""
return _robotsim.RobotModelDriver_getAffectedLink(self)
def getAffectedLinks(self, links):
"""
Returns the driver's affected links.
Args:
links (:obj:`list of int`)
"""
return _robotsim.RobotModelDriver_getAffectedLinks(self, links)
def getAffineCoeffs(self, scale, offset):
"""
For "affine" links, returns the scale and offset of the driver value mapped to
the world.
Args:
scale (:obj:`list of floats`)
offset (:obj:`list of floats`)
"""
return _robotsim.RobotModelDriver_getAffineCoeffs(self, scale, offset)
def setValue(self, val):
"""
Sets the robot's config to correspond to the given driver value.
Args:
val (float)
"""
return _robotsim.RobotModelDriver_setValue(self, val)
def getValue(self):
"""
Gets the current driver value from the robot's config.
Returns:
(float):
"""
return _robotsim.RobotModelDriver_getValue(self)
def setVelocity(self, val):
"""
Sets the robot's velocity to correspond to the given driver velocity value.
Args:
val (float)
"""
return _robotsim.RobotModelDriver_setVelocity(self, val)
def getVelocity(self):
"""
Gets the current driver velocity value from the robot's velocity.
Returns:
(float):
"""
return _robotsim.RobotModelDriver_getVelocity(self)
__swig_setmethods__["world"] = _robotsim.RobotModelDriver_world_set
__swig_getmethods__["world"] = _robotsim.RobotModelDriver_world_get
if _newclass:
world = _swig_property(_robotsim.RobotModelDriver_world_get, _robotsim.RobotModelDriver_world_set)
__swig_setmethods__["robotIndex"] = _robotsim.RobotModelDriver_robotIndex_set
__swig_getmethods__["robotIndex"] = _robotsim.RobotModelDriver_robotIndex_get
if _newclass:
robotIndex = _swig_property(_robotsim.RobotModelDriver_robotIndex_get, _robotsim.RobotModelDriver_robotIndex_set)
__swig_setmethods__["robotPtr"] = _robotsim.RobotModelDriver_robotPtr_set
__swig_getmethods__["robotPtr"] = _robotsim.RobotModelDriver_robotPtr_get
if _newclass:
robotPtr = _swig_property(_robotsim.RobotModelDriver_robotPtr_get, _robotsim.RobotModelDriver_robotPtr_set)
__swig_setmethods__["index"] = _robotsim.RobotModelDriver_index_set
__swig_getmethods__["index"] = _robotsim.RobotModelDriver_index_get
if _newclass:
index = _swig_property(_robotsim.RobotModelDriver_index_get, _robotsim.RobotModelDriver_index_set)
__swig_destroy__ = _robotsim.delete_RobotModelDriver
__del__ = lambda self: None
RobotModelDriver_swigregister = _robotsim.RobotModelDriver_swigregister
RobotModelDriver_swigregister(RobotModelDriver)
class RobotModel(_object):
"""
A model of a dynamic and kinematic robot.
Stores both constant information, like the reference placement of the links,
joint limits, velocity limits, etc, as well as a *current configuration* and
*current velocity* which are state-dependent. Several functions depend on the
robot's current configuration and/or velocity. To update that, use the
setConfig() and setVelocity() functions. setConfig() also update's the robot's
link transforms via forward kinematics. You may also use setDOFPosition and
setDOFVelocity for individual changes, but this is more expensive because each
call updates all of the affected the link transforms.
It is important to understand that changing the configuration of the model
doesn't actually send a command to the physical / simulated robot. Moreover, the
model does not automatically get updated when the physical / simulated robot
moves. In essence, the model maintains temporary storage for performing
kinematics, dynamics, and planning computations, as well as for visualization.
The state of the robot is retrieved using getConfig/getVelocity calls, and is
set using setConfig/setVelocity. Because many routines change the robot's
configuration, like IK and motion planning, a common design pattern is to
save/restore the configuration as follows::
q = robot.getConfig()
do some stuff that may touch the robot's configuration...
robot.setConfig(q)
The model maintains configuration/velocity/acceleration/torque bounds. However,
these are not enforced by the model, so you can happily set configurations
outside must rather be enforced by the planner / simulator.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RobotModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RobotModel, name)
__repr__ = _swig_repr
def __init__(self):
"""
Returns:
(:class:`~klampt.RobotModel`):
"""
this = _robotsim.new_RobotModel()
try:
self.this.append(this)
except Exception:
self.this = this
def loadFile(self, fn):
"""
Loads the robot from a file.
Args:
fn (str)
Returns:
(bool):
"""
return _robotsim.RobotModel_loadFile(self, fn)
def saveFile(self, fn, geometryPrefix=None):
"""
Saves the robot. If geometryPrefix == NULL, the geometry is not saved (default).
Otherwise, the geometry of each link will be saved to files named
geometryPrefix+name, where name is either the name of the geometry file that was
loaded, or [link_name].off.
saveFile (fn,geometryPrefix=None): bool
saveFile (fn): bool
Args:
fn (str):
geometryPrefix (str, optional): default value None
Returns:
(bool):
"""
return _robotsim.RobotModel_saveFile(self, fn, geometryPrefix)
def getID(self):
"""
Returns the ID of the robot in its world (Note: not the same as the robot index)
Returns:
(int):
"""
return _robotsim.RobotModel_getID(self)
def getName(self):
"""
Returns:
(str):
"""
return _robotsim.RobotModel_getName(self)
def setName(self, name):
"""
Args:
name (str)
"""
| |
<reponame>yrafalin/7thGrade<gh_stars>0
import math
import random
import time
keepgoin = 'yes'
xwin = 0
owin = 0
currentplay = ''
def printboard():
print(' ')
print(' | |')
print(' {spot1} | {spot2} | {spot3}'.format(spot1 = spots[0], spot2 =
spots[1], spot3 = spots[2]))
print(' | |')
print(' TTTTTTTTTTTTTTTTT')
print(' | |')
print(' {spot1} | {spot2} | {spot3}'.format(spot1 = spots[3], spot2 =
spots[4], spot3 = spots[5]))
print(' | |')
print(' TTTTTTTTTTTTTTTTT')
print(' | |')
print(' {spot1} | {spot2} | {spot3}'.format(spot1 = spots[6], spot2 =
spots[7], spot3 = spots[8]))
print(' | |')
def ifwon(spotis, player, add):
global xwin
global owin
if spotis == 1:
if spots[2] == spots[1] and spots[1] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[4] == spots[8] and spots[8] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[3] == spots[6] and spots[6] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 2:
if spots[2] == spots[0] and spots[0] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[4] == spots[7] and spots[7] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 3:
if spots[1] == spots[0] and spots[0] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[4] == spots[6] and spots[6] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[5] == spots[8] and spots[8] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 4:
if spots[0] == spots[6] and spots[6] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[4] == spots[5] and spots[5] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 5:
if spots[0] == spots[8] and spots[8] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[1] == spots[7] and spots[7] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[2] == spots[6] and spots[6] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[5] == spots[3] and spots[3] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 6:
if spots[4] == spots[3] and spots[3] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[2] == spots[8] and spots[8] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 7:
if spots[3] == spots[0] and spots[0] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[4] == spots[2] and spots[2] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[7] == spots[8] and spots[8] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 8:
if spots[1] == spots[4] and spots[4] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[6] == spots[8] and spots[8] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spotis == 9:
if spots[0] == spots[4] and spots[4] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[2] == spots[5] and spots[5] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
if spots[6] == spots[7] and spots[7] == ' ' + player + ' ':
if ' ' + player + ' ' == 'X':
if add == 'yes':
xwin += 1
return 1
else:
if add == 'yes':
owin += 1
return 2
return 0
def computerplay():
global currentplay
global spotnow
emptycount = []
count = 0
dontdo = 0
for thing in spots:
if thing == ' ':
emptycount.append(count)
count += 1
for nothing in emptycount:
if dontdo == 0:
if ifwon(nothing + 1, computer, 'no') == 1 or ifwon(nothing + 1, computer, 'no') == 2:
spotnow = nothing + 1
dontdo = 1
if dontdo == 0:
if ifwon(nothing + 1, player, 'no') == 1 or ifwon(nothing + 1, player, 'no') == 2:
spotnow = nothing + 1
dontdo = 1
if dontdo == 0:
spotnow = random.randrange(0, len(emptycount))
while spots[spotnow - 1] != ' ':
spotnow = random.randrange(0, len(emptycount))
spots[spotnow] = ' ' + computer + ' '
def board_is_full():
return (spots[0] is not ' ' and
spots[1] | |
<filename>python/lsst/eotask_gen3/eoCalibTable.py
""" Base classes for Electrical Optical (EO) calibration data
These classes define the interface between the transient data classes used
in EO test code, and the `astropy.table.Table` classes used for persistent
storage.
Specifically they provide ways to define table structures in schema, and the
use those schema to facilitate backwards compatibility.
"""
import sys
from typing import Mapping
from collections import OrderedDict
from astropy.table import Table, Column
__all__ = ["EoCalibField", "EoCalibTableSchema", "EoCalibTable", "EoCalibTableHandle"]
class EoCalibField:
""" Defines a single field and provide the information needed to connect
a Class attribute (e.g., self.aVariable) to an
`astropy.table.Column` (e.g., 'VARIABLE')
Parameters
----------
name : `str`
Name of the column. In UPPER by convention.
dtype : `type`
Data type for elements in the Column. E.g., `float` or `int`.
shape : `list`
Define the shape of each element in the Column.
List elements can be either `int` or `str`
`str` elements will replaced with `int` at construction using keywords
kwds : `dict` [`str`, `Any`]
These will be passed to `astropy.table.Column` constructor.
Notes
-----
This class should be used as class attribute in defining table schema
classes, I.e., it should only ever appear as a class attribute in a
sub-class of `EoCalibTableSchema`
"""
@staticmethod
def _format_shape(shape, **kwargs):
""" Format the list of shape elements, replacing any `str` using
keywords
Parameters
----------
shape : `list`
Define the shape of each element in the Column.
kwargs : `dict` [`str`, `Any`]
Used to replace the str elements in shape
Returns
-------
outShape : `tuple` [`int`]
The shape of each element in the column
"""
outShape = []
for axis in shape:
if isinstance(axis, int):
outShape.append(axis)
continue
if isinstance(axis, str):
try:
outShape.append(kwargs[axis])
continue
except KeyError as msg: # pragma: no cover
raise KeyError("Failed to convert EoCalibField column shape %s." % str(shape)) from msg
raise TypeError("Axis shape items must be either int or str, not %s" % type(axis)) # pragma: no cover # noqa
return tuple(outShape)
def __init__(self, **kwargs):
""" C'tor, Fills class parameters """
kwcopy = kwargs.copy()
self._name = kwcopy.pop('name')
self._dtype = kwcopy.pop('dtype', float)
self._shape = kwcopy.pop('shape', [1])
self._kwds = kwcopy
@property
def name(self):
""" Name of the Column. """
return self._name
@property
def dtype(self):
""" Data type for elements in the Column. """
return self._dtype
@property
def shape(self):
""" Template shape of each element in the Column. """
return self._shape
@property
def kwds(self):
""" Remaining keywords passed to Column constructor. """
return self._kwds
def validateColumn(self, column):
""" Check that a column matches the definition.
Raises
------
ValueError : Column data type does not match definition.
"""
if 'unit' in self._kwds:
column.unit = self._kwds['unit']
if 'description' in self._kwds:
column.description = self._kwds['description']
# if column.dtype.type != self._dtype:
# raise ValueError("Column %s data type not equal to schema data type %s != %s" % # noqa
# (column.name, column.dtype.type, self._dtype))
def validateValue(self, value):
""" Check that a value matches the definition and can be
used to fill a column.
Raises
------
ValueError : value data type does not match definition.
"""
# if value.dtype.type != self._dtype:
# raise ValueError("Item %s data type not equal to schema data type %s != %s" % # noqa
# (self._name, type(value), self._dtype))
def makeColumn(self, **kwargs):
""" Construct and return an `astropy.table.Column`
Notes
-----
Uses keyword arguements in two ways:
1. Replace string in shape template
2. `length' is used to set column lenght
"""
return Column(name=self._name, dtype=self._dtype,
shape=self._format_shape(self._shape, **kwargs),
length=kwargs.get('length', 0),
**self._kwds)
def convertToValue(self, column, **kwargs):
""" Return data from column as a `numpy.array`
Keywords
--------
validate : `bool`
If true, will validate the column
"""
if kwargs.get('validate', False): # pragma: no cover
self.validateColumn(column)
return column.data
def convertToColumn(self, value, **kwargs):
""" Construct and return an `astropy.table.Column` from value.
Keywords
--------
validate : `bool`
If true, will validate the value
"""
if kwargs.get('validate', False): # pragma: no cover
self.validateValue(value)
return Column(name=self._name, dtype=self._dtype,
data=value, **self._kwds)
def writeMarkdownLine(self, varName, stream=sys.stdout):
""" Write a line of markdown describing self to stream
Parameters
----------
varName : `str`
The name of the variable associated to this field.
"""
md_dict = dict(varName=varName,
name=self._name,
dtype=self.dtype.__name__,
shape=self.shape,
unit="", description="")
md_dict.update(self._kwds)
tmpl = "| {varName} | {name} | {dtype} | {shape} | {unit} | {description} | \n".format(**md_dict)
stream.write(tmpl)
def copy(self, **kwargs):
""" Return an udpated copy of self using keyword to override fields """
kwcopy = dict(name=self._name, dtype=self._dtype, shape=self._shape)
kwcopy.update(self.kwds)
kwcopy.update(kwargs)
return EoCalibField(**kwcopy)
class EoCalibTableSchema:
""" Stores schema for a single `astropy.table.Table`
Each sub-class will define one version of the schema.
The naming convention for the sub-classes is:
{DataClassName}SchemaV{VERSION} e.g., 'EoTableDataSchemaV0'
Parameters
----------
TABLELENGTH : `str`
Name of the keyword to use to extract table length
fieldDict : `OrderedDict` [`str`, `EoCalibField`]
Maps field names (e.g., 'aVariable') to EoCalibField objects
columnDict : `OrderedDict` [`str`, `str`]
Maps column names (e.g., 'VARIABLE') to field names
"""
TABLELENGTH = ""
@classmethod
def findFields(cls):
""" Find and return the EoCalibField objects in a class
Returns
-------
fields : `OrderedDict` [`str`, `EoCalibField`]
"""
theClasses = cls.mro()
fields = OrderedDict()
for theClass in theClasses:
for key, val in theClass.__dict__.items():
if isinstance(val, EoCalibField):
fields[key] = val
return fields
@classmethod
def fullName(cls):
""" Return the name of this class """
return cls.__name__
@classmethod
def version(cls):
""" Return the version number of this schema
This relies on the naming convention: {DataClassName}SchemaV{VERSION}
"""
cStr = cls.__name__
return int(cStr[cStr.find("SchemaV")+7:])
@classmethod
def dataClassName(cls):
""" Return the name of the associated data class
This relies on the naming convention: {DataClassName}SchemaV{VERSION}
"""
cStr = cls.__name__
return cStr[:cStr.find("SchemaV")]
def __init__(self):
""" C'tor, Fills class parameters """
self._fieldDict = self.findFields()
self._columnDict = OrderedDict([(val.name, key) for key, val in self._fieldDict.items()])
def validateTable(self, table):
""" Check that table matches this schema
Raises
------
KeyError : Columns names in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
for col in table.columns:
try:
key = self._columnDict[col]
field = self._fieldDict[key]
unused.pop(key, None)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(col.name, type(self))) from msg
field.validateColumn(table[col])
if unused: # pragma: no cover
raise KeyError("%s.validateTable() failed because some columns were not provided %s" %
(type(self), str(unused)))
def validateDict(self, dictionary):
""" Check that dictionary matches this schema
Raises
------
KeyError : dictionary keys in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
for key, val in dictionary.items():
if key == 'meta':
continue
try:
field = self._fieldDict[key]
unused.pop(key, None)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(key, type(self))) from msg
field.validateValue(val)
if unused: # pragma: no cover
raise KeyError("%s.validateDict() failed because some columns were not provided %s" %
(type(self), str(unused)))
def makeTable(self, **kwargs):
""" Make and return an `astropy.table.Table`
Notes
-----
keywords are used to define table length and element shapes
"""
kwcopy = kwargs.copy()
length = kwcopy.pop(self.TABLELENGTH, 0)
table = Table([val.makeColumn(length=length, **kwcopy) for val in self._fieldDict.values()])
table.meta['schema'] = self.fullName()
table.meta['name'] = kwcopy.pop('name', None)
table.meta['handle'] = kwcopy.pop('handle', None)
return table
def convertToTable(self, dictionary, **kwargs):
""" Convert dictionary to `astropy.table.Table` and return it
Keywords
--------
validate : `bool`
If true, will validate the columns
Raises
------
KeyError : dictionary keys in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
columns = []
meta = None
for key, val in dictionary.items():
if key == 'meta':
meta = val
continue
try:
field = self._fieldDict[key]
unused.pop(key)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(key, type(self))) from msg
columns.append(field.convertToColumn(val), **kwargs)
if unused: # pragma: no cover
raise KeyError("%s.validateDict() failed because some columns were not provided %s" %
(type(self), str(unused)))
table = Table(columns)
if meta:
table.meta.update(meta)
return table
def convertToDict(self, table, **kwargs):
""" Convert table to `OrderedDict` and return it
Keywords
--------
validate : `bool`
If true, will validate the columns
Raises
------
KeyError : column names in table do not match schema
"""
unused | |
= Fp(e*c)
assert C_final == sx.n*G + sr.n*H
return True
def pedersen_vector_test():
x_arr, r_arr, C_arr = [], [], []
for _ in range(10):
x_elem = uint256_from_str(os.urandom(32))
C_elem, r_elem = make_pedersen_commitment(x_elem)
x_arr.append(x_elem)
C_arr.append(C_elem)
r_arr.append(r_elem)
getChallenge, getTranscript = make_honest_verifier_challenger()
prf = pedersen_vector_prover(C_arr, x_arr, r_arr, getChallenge)
assert pedersen_vector_verifier(C_arr, prf, getTranscript)
print("Pedersen vector correctness test complete!")
pedersen_vector_test()
"""
## Part 2. Arithmetic relations
Example: a more complicated discrete log proof
Zk{ (a, b): A=a*G, B=b*G, C = (a*(b-3)) * G }
First rewrite as:
Zk{ (a, b): A=a*G, B=b*G, (C + 3*A) = b*A) }
You need to implement a prover and verifier for the above scheme.
"""
def arith_prover(a, b, A, B, C, getChallenge=sha2, rnd_bytes=os.urandom):
"""
Params:
a and b are elements of Fp
A, B, C are Points
Returns:
prf, of the form (KA,KB,KC,sa,sb)
Must satisfy verify_proof2(A, B, C, prf)
Must be zero-knowledge
"""
assert a*G == A
assert b*G == B
assert (a*(b-3))*G == C
# TODO: fill in your code here (10 points)
# blinding factor
t_1 = uint256_from_str(rnd_bytes(32)) % order
t_2 = uint256_from_str(rnd_bytes(32)) % order
# commitment
KA = t_1*G
KB = t_2*G
KC = t_2*a*G
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(KA) + ser(KB) + ser(KC)))
# response
sa = Fp(t_1 + c*a)
sb = Fp(t_2 + c*b)
return (KA,KB,KC,sa,sb)
def arith_verifier(A, B, C, prf, getTranscript=sha2, rnd_bytes=os.urandom):
(KA,KB,KC,sa,sb) = prf
assert type(KA) == type(KB) == type(KC) == Point
assert type(sa) == type(sb) == Fp
# TODO: fill in your code here (10 points)
c = uint256_from_str(getTranscript(ser(KA) + ser(KB) + ser(KC)))
assert sa.n *G == KA + c*A
assert sb.n *G == KB + c*B
assert sb.n *A == KC + c*(C+3*A)
return True
def arith_test():
# Randomly choose "a" and "b"
a = uint256_from_str(os.urandom(32))
b = uint256_from_str(os.urandom(32))
A = a*G
B = b*G
C = (a*(b-3)) * G
prf = arith_prover(a, b, A, B, C)
assert arith_verifier(A, B, C, prf)
print("Arithmetic Relation correctness test complete")
arith_test()
"""
## Part 3. OR composition
In this part you will need to combine two
Zk{ (a,b): A = a*G OR B = b*G }
without revealing which one it is you know.
The verifier is provided for you.
"""
def OR_prover(A, B, x, getChallenge=sha2, rnd_bytes=os.urandom):
assert x*G == A or x*G == B
# TODO: Fill your code in here (20 points)
if x*G == A:
# blinding factor
cb = uint256_from_str(rnd_bytes(32)) % order
sb = uint256_from_str(rnd_bytes(32)) % order
t_1 = uint256_from_str(rnd_bytes(32)) % order
# commitment
KA = t_1*G
KB = sb*G - B*cb
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(KA) + ser(KB)))
# response
ca = (c - cb) % p
sa = Fp(t_1 + ca*x)
sb = Fp(sb)
return (KA, KB, sa, sb, ca, cb)
if x*G == B:
# blinding factor
ca = uint256_from_str(rnd_bytes(32)) % order
sa = uint256_from_str(rnd_bytes(32)) % order
t_2 = uint256_from_str(rnd_bytes(32)) % order
# commitment
KA = sa*G - A*ca
KB = t_2*G
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(KA) + ser(KB)))
# response
cb = (c - ca) % p
sb = Fp(t_2 + cb*x)
sa = Fp(sa)
return (KA, KB, sa, sb, ca, cb)
def OR_verifier(A, B, prf, getTranscript=sha2):
(KA,KB,sa,sb,ca,cb) = prf
assert type(KA) is type(KB) is Point
assert type(sa) is type(sb) is Fp
# Check the challenges are correctly constrained
c = uint256_from_str(getTranscript(ser(KA) + ser(KB)))
assert (ca + cb) % p == c
# Check each proof the same way
assert sa.n *G == KA + ca*A
assert sb.n *G == KB + cb*B
return True
def OR_test1():
# Try first way
a = uint256_from_str(os.urandom(32))
A = a*G
B = random_point()
getChallenge, getTranscript = make_honest_verifier_challenger()
prf = OR_prover(A, B, a, getChallenge)
assert OR_verifier(A, B, prf, getTranscript)
print("OR composition correctness 1 test complete!")
def OR_test2():
# Try second way
b = uint256_from_str(os.urandom(32))
A = random_point()
B = b*G
getChallenge, getTranscript = make_honest_verifier_challenger()
prf = OR_prover(A, B, b, getChallenge)
assert OR_verifier(A, B, prf, getTranscript)
print("OR composition correctness 2 test complete!")
OR_test1()
OR_test2()
"""
## Part 4. Schnorr signature
We can write a Schnor signature as:
SoK[m] { (x): X = x*G }
Similar to part 1, except the challenge is derived in part from the message.
"""
def schnorr_sign(x, m, getChallenge=sha2, rnd_bytes=os.urandom):
assert type(x) is bytes
assert type(m) is str
# TODO: Your code goes here (10 points)
# blinding factor
k = uint256_from_str(rnd_bytes(32)) % order
# commitment
K = k*G
# Invoke the random oracle to receive a challenge
c = uint256_from_str(getChallenge(ser(K) + sha2(m).hex()))
# response
s = Fp(k + c*uint256_from_str(x))
sig = bytes.fromhex(ser(K)) + uint256_to_str(int(s))
return sig
def schnorr_verify(X, m, sig, getTranscript=sha2):
assert type(X) is Point
assert type(sig) is bytes and len(sig) is 65
(K,s) = deser(sig[:33].hex()), uint256_from_str(sig[33:])
c = uint256_from_str(getTranscript(ser(K) + sha2(m).hex()))
assert s *G == K + c*X
return True
def schnorr_test():
msg = "hello"
x = os.urandom(32)
X = uint256_from_str(x) * G
sig = schnorr_sign(x, msg)
assert schnorr_verify(X, msg, sig)
print("Schnorr Test complete")
schnorr_test()
"""
## Part 5. Range proofs
- Create a proof that C = g^a*h^r, and a is in the range [0,64).
Zk{ (a, r): C = g^a*h^r and 0 <= a <= 63 }
Hint: You can implement this by creating commitments to the binary expansion
of A, and then proving the following:
Zk{ (b0, b1, ... b4,b5, r0, r1, ..., r4, r5, r'): A = g^(b0 + 2*b1 + ... + 16*b4 + 32*b5)*g^(r0 + 2*r1 + ... + 16*r4 + 32*r5)*h(r')
and (C0 = g^(b0) h^r0) ....
and (C0 = g h^r0 OR C0 = h^r0) ... }
"""
def range_prover(a, r, C, getChallenge=sha2, rnd_bytes=os.urandom):
assert type(C) is Point
assert a*G + r*H == C
# TODO: fill in your code here (10 points)
# Peterson Commitment
t_1 = uint256_from_str(rnd_bytes(32)) % order
t_2 = uint256_from_str(rnd_bytes(32)) % order
KC = t_1*G + t_2*H
c = uint256_from_str(getChallenge(ser(KC)))
sx = Fp(t_1 + c*a)
sr = Fp(t_2 + c*r)
# 6 OR proofs
assert a>=0 and a<=63
[b5, b4, b3, b2, b1, b0] = [i for i in (6-len(bin(a)[2:]))*"0"+bin(a)[2:]]
if (r>=0) and (r<=63):
[r5, r4, r3, r2, r1, r0] = [i for i in (6-len(bin(r)[2:]))*"0"+bin(r)[2:]]
else:
[r5, r4, r3, r2, r1, r0] = [i for i in bin(r)[-6:]]
C0 = b0*G + r0*H
C1 = b1*G + r*H
C2 = b2*G + r2*H
C3 = b3*G + r3*H
C4 = b4*G + r4*H
C5 = b5*G + r5*H
p0 = OR_prover(C0, Fp(C0-G), r0)
p1 = OR_prover(C1, Fp(C1-G), r1)
p2 = OR_prover(C2, Fp(C2-G), r2)
p3 = OR_prover(C3, Fp(C3-G), r3)
p4 = OR_prover(C4, Fp(C4-G), r4)
p5 = OR_prover(C5, Fp(C5-G), r5)
# prove b0 + 2*b1 + ... + 16*b4 + 32*b5 == a
return (KC,sx,sr), C0, p0, C1, p1, C2, p2, C3, p3, C4, p4, C5, p5
def range_verifier(C, prf, getTranscript=sha2, rnd_bytes=os.urandom):
assert type(C) is Point
# TODO: fill in your code here (10 points)
(KC,sx,sr), C0, p0, C1, p1, C2, p2, C3, p3, C4, p4, C5, p5 = prf
# assert Peterson Commitment
assert type(KC) == Point
assert type(sx) == type(sr) == Fp
c = uint256_from_str(getTranscript(ser(KC)))
assert sx.n *G + sr.n *H == KC + c*C
# assert 6 OR_proof
assert OR_verifier(C0, Fp(C0-G), p0)
assert OR_verifier(C1, Fp(C1-G), p1)
assert OR_verifier(C2, Fp(C2-G), p2)
assert OR_verifier(C3, Fp(C3-G), p3)
assert OR_verifier(C4, Fp(C4-G), p4)
assert OR_verifier(C5, Fp(C5-G), p5)
# assert b0 + 2*b1 + ... + 16*b4 + 32*b5 == a
return True
"""
## Part 6: Extractor and simulator
In this part, you will implement in code a portion of the security proof
for the discrete log proof scheme from the Preliminary.
"""
def dlog_extractor(A, Adv):
assert type(A) is Point
## Step 1: run the adversary once to generate a commit, challenge, and response.
## Generate random bytes on demand, and save the Commit and the Verifier's challenge.
##
# TODO: Fill your code in here
def _get_challenge(inp):
nonlocal __challenge
return __challenge
def _rnd_bytes(inp):
nonlocal __rnd_bytes
return __rnd_bytes
__challenge = os.urandom(32)
__rnd_bytes | |
== 4.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].nominal_voltage == 0.48 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)"].feeder_name == "sourcebus_src"
assert m["tr(r:p4udt27-p4udt27lv)"].noload_loss == 0.6
assert m["tr(r:p4udt27-p4udt27lv)"].loadloss == 1.74408
assert m["tr(r:p4udt27-p4udt27lv)"].phase_shift == 0
assert m["tr(r:p4udt27-p4udt27lv)"].normhkva == 82.5
assert m["tr(r:p4udt27-p4udt27lv)"].from_element == "p4udt27"
assert m["tr(r:p4udt27-p4udt27lv)"].to_element == "p4udt27lv"
assert m["tr(r:p4udt27-p4udt27lv)"].reactances == [pytest.approx(3.240000000000000)]
assert m["tr(r:p4udt27-p4udt27lv)"].is_center_tap == 0
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].connection_type == "Y"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].connection_type == "Y"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].voltage_type == 0
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].voltage_type == 2
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].reverse_resistance == None
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].reverse_resistance == None
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[2].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[2].tap_position == 1.0
)
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[2].phase == "C"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[2].phase == "C"
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[0].compensator_r == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[1].compensator_r == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[2].compensator_r == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[0].compensator_r == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[1].compensator_r == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[2].compensator_r == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[0].compensator_x == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[1].compensator_x == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[0].phase_windings[2].compensator_x == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[0].compensator_x == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[1].compensator_x == None
)
assert (
m["tr(r:p4udt27-p4udt27lv)"].windings[1].phase_windings[2].compensator_x == None
)
# Three phase Wye-Wye Transformer (modified) from 4kV SMART-DS region P4U
assert m["tr(r:p4udt27-p4udt27lv)_1"].name == "tr(r:p4udt27-p4udt27lv)_1"
assert (
len(m["tr(r:p4udt27-p4udt27lv)_1"].windings) == 2
) # Transformer should have 2 Windings
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].nominal_voltage == 4.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].nominal_voltage == 0.48 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_1"].feeder_name == "sourcebus_src"
assert m["tr(r:p4udt27-p4udt27lv)_1"].noload_loss == 0.6
assert m["tr(r:p4udt27-p4udt27lv)_1"].loadloss == 1.74408
assert m["tr(r:p4udt27-p4udt27lv)_1"].phase_shift == -30
assert m["tr(r:p4udt27-p4udt27lv)_1"].normhkva == 82.5
assert m["tr(r:p4udt27-p4udt27lv)_1"].from_element == "p4udt27"
assert m["tr(r:p4udt27-p4udt27lv)_1"].to_element == "p4udt27lv"
assert m["tr(r:p4udt27-p4udt27lv)_1"].reactances == [
pytest.approx(3.240000000000000)
]
assert m["tr(r:p4udt27-p4udt27lv)_1"].is_center_tap == 0
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].connection_type == "D"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].connection_type == "Y"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].voltage_type == 0
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].voltage_type == 2
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].reverse_resistance == None
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].reverse_resistance == None
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[2].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[2].tap_position == 1.0
)
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[2].phase == "C"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[2].phase == "C"
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[0].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[1].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[2].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[0].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[1].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[2].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[0].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[1].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[0].phase_windings[2].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[0].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[1].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_1"].windings[1].phase_windings[2].compensator_x
== None
)
# Three phase Wye-Wye Transformer (modified) from 4kV SMART-DS region P4U
assert m["tr(r:p4udt27-p4udt27lv)_2"].name == "tr(r:p4udt27-p4udt27lv)_2"
assert (
len(m["tr(r:p4udt27-p4udt27lv)_2"].windings) == 2
) # Transformer should have 2 Windings
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].nominal_voltage == 4.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].nominal_voltage == 0.48 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_2"].feeder_name == "sourcebus_src"
assert m["tr(r:p4udt27-p4udt27lv)_2"].noload_loss == 0.6
assert m["tr(r:p4udt27-p4udt27lv)_2"].loadloss == 1.74408
assert m["tr(r:p4udt27-p4udt27lv)_2"].phase_shift == 0
assert m["tr(r:p4udt27-p4udt27lv)_2"].normhkva == 82.5
assert m["tr(r:p4udt27-p4udt27lv)_2"].from_element == "p4udt27"
assert m["tr(r:p4udt27-p4udt27lv)_2"].to_element == "p4udt27lv"
assert m["tr(r:p4udt27-p4udt27lv)_2"].reactances == [
pytest.approx(3.240000000000000)
]
assert m["tr(r:p4udt27-p4udt27lv)_2"].is_center_tap == 0
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].connection_type == "D"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].connection_type == "D"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].voltage_type == 0
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].voltage_type == 2
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].reverse_resistance == None
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].reverse_resistance == None
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[2].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[2].tap_position == 1.0
)
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[2].phase == "C"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[2].phase == "C"
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[0].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[1].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[2].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[0].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[1].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[2].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[0].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[1].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[0].phase_windings[2].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[0].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[1].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_2"].windings[1].phase_windings[2].compensator_x
== None
)
# Three phase Wye-Wye Transformer (modified) from 4kV SMART-DS region P4U
assert m["tr(r:p4udt27-p4udt27lv)_3"].name == "tr(r:p4udt27-p4udt27lv)_3"
assert (
len(m["tr(r:p4udt27-p4udt27lv)_3"].windings) == 2
) # Transformer should have 2 Windings
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].nominal_voltage == 4.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].nominal_voltage == 0.48 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_3"].feeder_name == "sourcebus_src"
assert m["tr(r:p4udt27-p4udt27lv)_3"].noload_loss == 0.6
assert m["tr(r:p4udt27-p4udt27lv)_3"].loadloss == 1.74408
assert m["tr(r:p4udt27-p4udt27lv)_3"].phase_shift == -30
assert m["tr(r:p4udt27-p4udt27lv)_3"].normhkva == 82.5
assert m["tr(r:p4udt27-p4udt27lv)_3"].from_element == "p4udt27"
assert m["tr(r:p4udt27-p4udt27lv)_3"].to_element == "p4udt27lv"
assert m["tr(r:p4udt27-p4udt27lv)_3"].reactances == [
pytest.approx(3.240000000000000)
]
assert m["tr(r:p4udt27-p4udt27lv)_3"].is_center_tap == 0
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].connection_type == "Y"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].connection_type == "D"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].rated_power == 75.0 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].emergency_power == 112.5 * 10 ** 3
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].resistance == 0.87204
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].voltage_type == 0
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].voltage_type == 2
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].voltage_limit == None
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].reverse_resistance == None
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].reverse_resistance == None
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[2].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[0].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[1].tap_position == 1.0
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[2].tap_position == 1.0
)
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[2].phase == "C"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[0].phase == "A"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[1].phase == "B"
assert m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[2].phase == "C"
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[0].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[1].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[2].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[0].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[1].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[2].compensator_r
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[0].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[1].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[0].phase_windings[2].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[0].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[1].compensator_x
== None
)
assert (
m["tr(r:p4udt27-p4udt27lv)_3"].windings[1].phase_windings[2].compensator_x
== None
)
# Center Tap Transformer from 4kV SMART-DS region P4U
assert m["tr(r:p4udt25-p4udt25lv)"].name == "tr(r:p4udt25-p4udt25lv)"
assert (
len(m["tr(r:p4udt25-p4udt25lv)"].windings) == 3
) # Transformer tr(r:p4udt25-p4udt25lv) should have 3 Windings
assert m["tr(r:p4udt25-p4udt25lv)"].windings[0].nominal_voltage == 2.3094 * 10 ** 3
assert (
m["tr(r:p4udt25-p4udt25lv)"].windings[1].nominal_voltage == 0.120089 * 10 ** 3
)
assert (
m["tr(r:p4udt25-p4udt25lv)"].windings[2].nominal_voltage == 0.120089 * 10 ** 3
)
assert m["tr(r:p4udt25-p4udt25lv)"].feeder_name == "sourcebus_src"
assert m["tr(r:p4udt25-p4udt25lv)"].noload_loss == 0.472
assert m["tr(r:p4udt25-p4udt25lv)"].loadloss == 0.798816
assert m["tr(r:p4udt25-p4udt25lv)"].phase_shift == 0
assert m["tr(r:p4udt25-p4udt25lv)"].normhkva == 27.5
assert m["tr(r:p4udt25-p4udt25lv)"].from_element == "p4udt25"
assert m["tr(r:p4udt25-p4udt25lv)"].to_element == "p4udt25lv"
assert m["tr(r:p4udt25-p4udt25lv)"].reactances == [2.4, 1.6, 2.4]
assert m["tr(r:p4udt25-p4udt25lv)"].is_center_tap == 1
assert m["tr(r:p4udt25-p4udt25lv)"].windings[0].connection_type == "Y"
assert m["tr(r:p4udt25-p4udt25lv)"].windings[1].connection_type == "Y"
assert m["tr(r:p4udt25-p4udt25lv)"].windings[2].connection_type == "Y"
assert m["tr(r:p4udt25-p4udt25lv)"].windings[0].rated_power == 25.0 * 10 ** 3
assert m["tr(r:p4udt25-p4udt25lv)"].windings[1].rated_power == 25.0 * 10 ** 3
assert m["tr(r:p4udt25-p4udt25lv)"].windings[2].rated_power == 25.0 * 10 ** 3
assert m["tr(r:p4udt25-p4udt25lv)"].windings[0].emergency_power == 37.5 * 10 ** 3
assert m["tr(r:p4udt25-p4udt25lv)"].windings[1].emergency_power == 37.5 * 10 ** 3
assert m["tr(r:p4udt25-p4udt25lv)"].windings[2].emergency_power == 37.5 * 10 ** 3
assert m["tr(r:p4udt25-p4udt25lv)"].windings[0].resistance == 0.266272
assert m["tr(r:p4udt25-p4udt25lv)"].windings[1].resistance == 0.532544
assert m["tr(r:p4udt25-p4udt25lv)"].windings[2].resistance == 0.532544
assert m["tr(r:p4udt25-p4udt25lv)"].windings[0].voltage_type == 0
assert m["tr(r:p4udt25-p4udt25lv)"].windings[1].voltage_type == | |
to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_awgn(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc1.viterbi_decoder(yn_hard,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/2 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 77, BEP = 7.72e-03
kmax = 0, taumax = 0
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
*****************************************************
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
>>> # Consider the trellis traceback after the sim completes
>>> cc1.traceback_plot()
>>> plt.show()
>>> # Compare a collection of simulation results with soft decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
>>> Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
>>> Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
>>> Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
>>> Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
>>> Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
>>> Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_4,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_5,'g')
>>> plt.semilogy(SNRdB,Pb_s_third_6,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_7,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_8,'--')
>>> plt.semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Soft Decision Rate 1/2 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Hard decision rate 1/3 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 3
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc2 = fec.FECConv(('11111','11011','10101'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc2.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_awgn(2*y-1,EbN0-10*np.log10(3),1) # Channel SNR is 10*log10(3) dB less
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc2.viterbi_decoder(yn_hard.real,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/3 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
*****************************************************
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
>>> # Compare a collection of simulation results with hard decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
>>> Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
>>> Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
>>> Pb_s_third_5_hard_sim = np.array([8.94e-04,1.11e-04,8.73e-06])
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3_hard,'r--')
>>> plt.semilogy(SNRdB,Pb_s_third_5_hard,'g--')
>>> plt.semilogy(SNRdB,Pb_s_third_7_hard,'k--')
>>> plt.semilogy(np.array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Hard Decision Rate 1/3 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Show the traceback for the rate 1/3 hard decision case
>>> cc2.traceback_plot()
"""
if metric_type == 'hard':
# If hard decision must have 0/1 integers for input else float
if np.issubdtype(x.dtype, np.integer):
if x.max() > 1 or x.min() < 0:
raise ValueError('Integer bit values must be 0 or 1')
else:
raise ValueError('Decoder inputs must be integers on [0,1] for hard decisions')
# Initialize cumulative metrics array
cm_present = np.zeros((self.Nstates,1))
NS = len(x) # number of channel symbols to process;
# must be even for rate 1/2
# must be a multiple of 3 for rate 1/3
y = np.zeros(NS-self.decision_depth) # Decoded bit sequence
k = 0
symbolL = self.rate.denominator
# Calculate branch metrics and update traceback states and traceback bits
for n in range(0,NS,symbolL):
cm_past = self.paths.cumulative_metric[:,0]
tb_states_temp = self.paths.traceback_states[:,:-1].copy()
tb_bits_temp = self.paths.traceback_bits[:,:-1].copy()
for m in range(self.Nstates):
d1 = self.bm_calc(self.branches.bits1[m],
x[n:n+symbolL],metric_type,
quant_level)
d1 = d1 + cm_past[self.branches.states1[m]]
d2 = self.bm_calc(self.branches.bits2[m],
x[n:n+symbolL],metric_type,
quant_level)
d2 = d2 + cm_past[self.branches.states2[m]]
if d1 <= d2: # Find the survivor assuming minimum distance wins
cm_present[m] = d1
self.paths.traceback_states[m,:] = np.hstack((self.branches.states1[m],
tb_states_temp[int(self.branches.states1[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input1[m],
tb_bits_temp[int(self.branches.states1[m]),:]))
else:
cm_present[m] = d2
self.paths.traceback_states[m,:] = np.hstack((self.branches.states2[m],
tb_states_temp[int(self.branches.states2[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input2[m],
tb_bits_temp[int(self.branches.states2[m]),:]))
# Update cumulative metric history
self.paths.cumulative_metric = np.hstack((cm_present,
self.paths.cumulative_metric[:,:-1]))
# Obtain estimate of input bit sequence from the oldest bit in
# the traceback having the smallest (most likely) cumulative metric
min_metric = min(self.paths.cumulative_metric[:,0])
min_idx = np.where(self.paths.cumulative_metric[:,0] == min_metric)
if n >= symbolL*self.decision_depth-symbolL: # 2 since Rate = 1/2
y[k] = self.paths.traceback_bits[min_idx[0][0],-1]
k += 1
y = y[:k] # trim final length
return y
def bm_calc(self,ref_code_bits, rec_code_bits, metric_type, quant_level):
"""
distance = bm_calc(ref_code_bits, rec_code_bits, metric_type)
Branch metrics calculation
<NAME> and <NAME> October 2018
"""
distance = 0
if metric_type == 'soft': # squared distance metric
bits = binary(int(ref_code_bits),self.rate.denominator)
for k in range(len(bits)):
ref_bit = (2**quant_level-1)*int(bits[k],2)
distance += (int(rec_code_bits[k]) - ref_bit)**2
elif metric_type == 'hard': # hard decisions
bits = binary(int(ref_code_bits),self.rate.denominator)
for k in range(len(rec_code_bits)):
distance += abs(rec_code_bits[k] - int(bits[k]))
elif metric_type == 'unquant': # unquantized
bits = binary(int(ref_code_bits),self.rate.denominator)
for k in range(len(bits)):
distance += (float(rec_code_bits[k])-float(bits[k]))**2
else:
warnings.warn('Invalid metric type specified')
raise ValueError('Invalid metric type specified. Use soft, hard, or unquant')
return distance
def conv_encoder(self,input,state):
"""
output, state = conv_encoder(input,state)
We get the 1/2 or 1/3 rate from self.rate
Polys G1 and G2 are entered as binary strings, e.g,
G1 = '111' and G2 = '101' for K = 3
G1 = '1011011' and G2 = '1111001' for K = 7
G3 is also included for rate 1/3
Input state as a binary string of length K-1, e.g., '00' or '0000000'
e.g., state = '00' for K = 3
e.g., state = '000000' for K = 7
<NAME> and <NAME> 2018
"""
output = []
if(self.rate == Fraction(1,2)):
for n in range(len(input)):
u1 = int(input[n])
u2 = int(input[n])
for m in range(1,self.constraint_length):
if int(self.G_polys[0][m]) == 1: # XOR if we have a connection
u1 = u1 ^ int(state[m-1])
if int(self.G_polys[1][m]) == 1: # XOR if we have a connection
u2 = u2 ^ int(state[m-1])
# G1 placed first, G2 placed second
output = np.hstack((output, [u1, u2]))
state = bin(int(input[n]))[-1] + state[:-1]
elif(self.rate == Fraction(1,3)):
for n in range(len(input)):
if(int(self.G_polys[0][0]) == 1):
u1 = int(input[n])
else:
u1 = 0
if(int(self.G_polys[1][0]) == 1):
u2 = int(input[n])
else:
u2 = 0
if(int(self.G_polys[2][0]) == 1):
u3 = int(input[n])
else:
u3 = 0
for m in range(1,self.constraint_length):
if int(self.G_polys[0][m]) == 1: # XOR if we have a connection
u1 = u1 ^ int(state[m-1])
if int(self.G_polys[1][m]) == 1: # XOR if we have a connection
u2 = u2 ^ int(state[m-1])
if int(self.G_polys[2][m]) == 1: # XOR if we have a connection
u3 = u3 ^ int(state[m-1])
# G1 placed first, G2 placed second, G3 placed third
output = np.hstack((output, [u1, u2, u3]))
state = bin(int(input[n]))[-1] + state[:-1]
return output, state
def puncture(self,code_bits,puncture_pattern = ('110','101')):
"""
Apply puncturing to the serial bits produced by convolutionally
encoding.
:param code_bits:
:param puncture_pattern:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` | |
activity: %s' % task)
return False
except models.ValidationError as e:
errors.append(str(e))
return False
except Exception as e:
errors.append('Unable to convert: %s, Error: %s' % (task, e))
return False
return True
def copy_to_lesson_13(
src_unit, src_lesson, dst_unit, dst_lesson, now_available,
errors):
dst_lesson.objectives = src_lesson.objectives
dst_lesson.video = src_lesson.video
if src_lesson.audion:
dst_lesson.audio = src_lesson.audio
else:
dst_lesson.audio = ''
dst_lesson.notes = src_lesson.notes
dst_lesson.duration = src_lesson.duration
dst_lesson.activity_listed = False
dst_lesson.now_available = now_available
# Copy over the activity. Note that we copy files directly and
# avoid all logical validations of their content. This is done for a
# purpose - at this layer we don't care what is in those files.
if src_lesson.activity:
# create a lesson with activity
if src_lesson.activity_title:
title = src_lesson.activity_title
else:
title = 'Activity'
lesson_w_activity = self.add_lesson(dst_unit, title)
lesson_w_activity.auto_index = False
lesson_w_activity.activity_listed = False
lesson_w_activity.now_available = now_available
src_filename = os.path.join(
src_course.app_context.get_home(),
src_course.get_activity_filename(
src_unit.unit_id, src_lesson.lesson_id))
if src_course.app_context.fs.isfile(src_filename):
text = src_course.app_context.fs.get(src_filename)
import_lesson12_activities(
text, dst_unit, lesson_w_activity, src_lesson.title,
errors)
def copy_lesson12_into_lesson13(
src_unit, src_lesson, dst_unit, dst_lesson, errors):
copy_to_lesson_13(
src_unit, src_lesson, dst_unit, dst_lesson, True, errors)
dst_lesson.now_available = True
def copy_lesson13_into_lesson13(
src_unit, src_lesson, dst_unit, dst_lesson, errors):
copy_to_lesson_13(
src_unit, src_lesson, dst_unit, dst_lesson,
src_lesson.now_available, errors)
dst_lesson.now_available = src_lesson.now_available
dst_lesson.scored = src_lesson.scored
dst_lesson.paid = src_lesson.paid
dst_lesson.properties = src_lesson.properties
def _copy_entities_between_namespaces(entity_types, from_ns, to_ns):
"""Copies entities between different namespaces."""
def _mapper_func(entity, unused_ns):
_add_entity_instance_to_a_namespace(
to_ns, entity.__class__, entity.key().id_or_name(),
entity.data)
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(from_ns)
for _entity_class in entity_types:
mapper = utils.QueryMapper(
_entity_class.all(), batch_size=DEFAULT_FETCH_LIMIT,
report_every=0)
mapper.run(_mapper_func, from_ns)
finally:
namespace_manager.set_namespace(old_namespace)
def _add_entity_instance_to_a_namespace(
ns, entity_class, _id_or_name, data):
"""Add new entity to the datastore of and a given namespace."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(ns)
new_key = db.Key.from_path(entity_class.__name__, _id_or_name)
new_instance = entity_class(key=new_key)
new_instance.data = data
new_instance.put()
finally:
namespace_manager.set_namespace(old_namespace)
# check editable
if not self._app_context.is_editable_fs():
errors.append(
'Target course %s must be '
'on read-write media.' % self.app_context.raw)
return None, None
# check empty
if self.get_units():
errors.append(
'Target course %s must be empty.' % self.app_context.raw)
return None, None
# import course settings
dst_settings = self.app_context.get_environ()
src_settings = src_course.app_context.get_environ()
dst_settings = deep_dict_merge(dst_settings, src_settings)
if not self.save_settings(dst_settings):
errors.append('Failed to import course settings.')
return None, None
# iterate over course structure and assets and import each item
with Namespace(self.app_context.get_namespace_name()):
for unit in src_course.get_units():
# import unit
new_unit = self.add_unit(unit.type, unit.title)
if src_course.version == CourseModel13.VERSION:
copy_unit13_into_unit13(unit, new_unit, src_course, errors)
elif src_course.version == CourseModel12.VERSION:
copy_unit12_into_unit13(unit, new_unit, errors)
else:
raise Exception(
'Unsupported course version: %s', src_course.version)
# import contained lessons
for lesson in src_course.get_lessons(unit.unit_id):
new_lesson = self.add_lesson(new_unit, lesson.title)
if src_course.version == CourseModel13.VERSION:
copy_lesson13_into_lesson13(
unit, lesson, new_unit, new_lesson, errors)
elif src_course.version == CourseModel12.VERSION:
copy_lesson12_into_lesson13(
unit, lesson, new_unit, new_lesson, errors)
else:
raise Exception(
'Unsupported course version: '
'%s', src_course.version)
# assign weights to assignments imported from version 12
if src_course.version == CourseModel12.VERSION:
if self.get_assessments():
w = common_utils.truncate(
100.0 / len(self.get_assessments()))
for x in self.get_assessments():
x.weight = w
# import course dependencies from the datastore
_copy_entities_between_namespaces(
list(COURSE_CONTENT_ENTITIES) + list(
ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT),
src_course.app_context.get_namespace_name(),
self.app_context.get_namespace_name())
return src_course, self
def to_json(self):
"""Creates JSON representation of this instance."""
persistent = PersistentCourse13(
next_id=self._next_id, units=self._units, lessons=self._lessons)
return transforms.dumps(
persistent.to_dict(),
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Workflow(object):
"""Stores workflow specifications for assessments."""
def __init__(self, yaml_str):
"""Sets yaml_str (the workflow spec), without doing any validation."""
self._yaml_str = yaml_str
def to_yaml(self):
return self._yaml_str
def to_dict(self):
if not self._yaml_str:
return {}
obj = yaml.safe_load(self._yaml_str)
assert isinstance(obj, dict)
return obj
def _convert_date_string_to_datetime(self, date_str):
"""Returns a datetime object."""
if not date_str:
return None
return datetime.strptime(date_str, ISO_8601_DATE_FORMAT)
def get_grader(self):
"""Returns the associated grader."""
return self.to_dict().get(GRADER_KEY)
def get_matcher(self):
return self.to_dict().get(MATCHER_KEY)
def get_submission_due_date(self):
date_str = self.to_dict().get(SUBMISSION_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_due_date(self):
date_str = self.to_dict().get(REVIEW_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_min_count(self):
return self.to_dict().get(REVIEW_MIN_COUNT_KEY)
def get_review_window_mins(self):
return self.to_dict().get(REVIEW_WINDOW_MINS_KEY)
def _ensure_value_is_nonnegative_int(self, workflow_dict, key, errors):
"""Checks that workflow_dict[key] is a non-negative integer."""
value = workflow_dict[key]
if not isinstance(value, int):
errors.append('%s should be an integer' % key)
elif value < 0:
errors.append('%s should be a non-negative integer' % key)
def validate(self, errors=None):
"""Tests whether the current Workflow object is valid."""
if errors is None:
errors = []
try:
# Validate the workflow specification (in YAML format).
assert self._yaml_str, 'missing key: %s.' % GRADER_KEY
workflow_dict = yaml.safe_load(self._yaml_str)
assert isinstance(workflow_dict, dict), (
'expected the YAML representation of a dict')
assert GRADER_KEY in workflow_dict, 'missing key: %s.' % GRADER_KEY
assert workflow_dict[GRADER_KEY] in ALLOWED_GRADERS, (
'invalid grader, should be one of: %s' %
', '.join(ALLOWED_GRADERS))
workflow_errors = []
submission_due_date = None
if SUBMISSION_DUE_DATE_KEY in workflow_dict.keys():
try:
submission_due_date = self._convert_date_string_to_datetime(
workflow_dict[SUBMISSION_DUE_DATE_KEY])
except Exception as e: # pylint: disable-msg=broad-except
workflow_errors.append(
'dates should be formatted as YYYY-MM-DD hh:mm '
'(e.g. 1997-07-16 19:20) and be specified in the UTC '
'timezone')
if workflow_errors:
raise Exception('%s.' % '; '.join(workflow_errors))
if workflow_dict[GRADER_KEY] == HUMAN_GRADER:
missing_keys = []
for key in HUMAN_GRADED_ASSESSMENT_KEY_LIST:
if key not in workflow_dict:
missing_keys.append(key)
elif (isinstance(workflow_dict[key], basestring) and not
workflow_dict[key]):
missing_keys.append(key)
assert not missing_keys, (
'missing key(s) for a human-reviewed assessment: %s.' %
', '.join(missing_keys))
if (workflow_dict[MATCHER_KEY] not in
review.ALLOWED_MATCHERS):
workflow_errors.append(
'invalid matcher, should be one of: %s' %
', '.join(review.ALLOWED_MATCHERS))
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_MIN_COUNT_KEY, workflow_errors)
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_WINDOW_MINS_KEY, workflow_errors)
try:
review_due_date = self._convert_date_string_to_datetime(
workflow_dict[REVIEW_DUE_DATE_KEY])
if submission_due_date > review_due_date:
workflow_errors.append(
'submission due date should be earlier than '
'review due date')
except Exception as e: # pylint: disable-msg=broad-except
workflow_errors.append(
'dates should be formatted as YYYY-MM-DD hh:mm '
'(e.g. 1997-07-16 19:20) and be specified in the UTC '
'timezone')
if workflow_errors:
raise Exception('%s.' % '; '.join(workflow_errors))
return True
except Exception as e: # pylint: disable-msg=broad-except
errors.append('Error validating workflow specification: %s' % e)
return False
class Course(object):
"""Manages a course and all of its components."""
# Place for modules to register additional schema fields for setting
# course options. Used in create_common_settings_schema().
#
# This is a dict of lists. The dict key is a string matching a
# sub-registry in the course schema. It is legitimate and expected usage
# to name a sub-schema that's created in create_common_settings_schema(),
# in which case the relevant settings are added to that subsection, and
# will appear with other settings in that subsection in the admin editor
# page.
#
# It is also reasonable to add a new subsection name. If you do that, you
# should also edit the registration of the settings sub-tabs in
# modules.dashboard.dashboard.register_module() to add either a new
# sub-tab, or add your section to an existing sub-tab.
#
# Schema providers are expected to be functions which take one argument:
# the current course. Providers should return exactly one SchemaField
# object, which will be added to the appropriate subsection.
OPTIONS_SCHEMA_PROVIDERS = collections.defaultdict(list)
# Holds callback functions which are passed the course object after it it
# loaded, to perform any further processing on loaded course data. An
# instance of the newly created course is passed into each of the hook
# methods in the order they were added to the list.
POST_LOAD_HOOKS = []
# Holds callback functions which are passed the course env dict after it is
# loaded, to perform any further processing on it.
COURSE_ENV_POST_LOAD_HOOKS = []
# Holds callback functions which are passed the course env dict after it is
# saved.
COURSE_ENV_POST_SAVE_HOOKS = []
# Data which is patched onto the course environment - for testing use only.
ENVIRON_TEST_OVERRIDES = {}
SCHEMA_SECTION_COURSE = 'course'
SCHEMA_SECTION_HOMEPAGE = 'homepage'
SCHEMA_SECTION_REGISTRATION = 'registration'
SCHEMA_SECTION_UNITS_AND_LESSONS = 'unit'
SCHEMA_SECTION_ASSESSMENT = 'assessment'
SCHEMA_SECTION_I18N = 'i18n'
# here we keep current course available to thread
INSTANCE = threading.local()
@classmethod
def get_schema_sections(cls):
ret = set([
cls.SCHEMA_SECTION_COURSE,
cls.SCHEMA_SECTION_HOMEPAGE,
cls.SCHEMA_SECTION_REGISTRATION,
cls.SCHEMA_SECTION_UNITS_AND_LESSONS,
cls.SCHEMA_SECTION_ASSESSMENT,
cls.SCHEMA_SECTION_I18N,
])
for name in cls.OPTIONS_SCHEMA_PROVIDERS:
ret.add(name)
return ret
@classmethod
def make_locale_environ_key(cls, locale):
"""Returns key used to store localized settings in memcache."""
return 'course:environ:locale:%s:%s' % (
os.environ.get('CURRENT_VERSION_ID'), locale)
@classmethod
def get_environ(cls, app_context):
"""Returns currently defined course settings as a dictionary."""
# pylint: disable-msg=protected-access
# get from local cache
env = app_context._cached_environ
if env:
return copy.deepcopy(env)
# get from global cache
_locale = app_context.get_current_locale()
_key = cls.make_locale_environ_key(_locale)
env = models.MemcacheManager.get(
_key, namespace=app_context.get_namespace_name())
if env:
| |
<gh_stars>100-1000
#!/usr/bin/env python
""" Blue Gecko BGAPI/BGLib implementation
Changelog:
2020-08-03 - Ported to Blue Gecko (Kris Young)
2017-06-26 - Moved to python3
2013-05-04 - Fixed single-item struct.unpack returns (@zwasson on Github)
2013-04-28 - Fixed numerous uint8array/bd_addr command arg errors
- Added 'debug' support
2013-04-16 - Fixed 'bglib_on_idle' to be 'on_idle'
2013-04-15 - Added wifi BGAPI support in addition to BLE BGAPI
- Fixed references to 'this' instead of 'self'
2013-04-11 - Initial release
============================================
Blue Gecko BGLib Python interface library
2013-05-04 by <NAME> <<EMAIL>>
Updates should (hopefully) always be available at https://github.com/jrowberg/bglib
============================================
BGLib Python interface library code is placed under the MIT license
Copyright (c) 2013 <NAME>
Modifications Copyright (c) 2020 Silicon Laboratories
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
===============================================
Generated on 2020-Aug-03 22:20:09
===============================================
"""
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "2013-05-04"
__email__ = "<EMAIL>"
import struct
# thanks to Masaaki Shibata for Python event handler code
# http://www.emptypage.jp/notes/pyevent.en.html
class BGAPIEvent(object):
def __init__(self, doc=None):
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
return BGAPIEventHandler(self, obj)
def __set__(self, obj, value):
pass
class BGAPIEventHandler(object):
def __init__(self, event, obj):
self.event = event
self.obj = obj
def _getfunctionlist(self):
"""(internal use) """
try:
eventhandler = self.obj.__eventhandler__
except AttributeError:
eventhandler = self.obj.__eventhandler__ = {}
return eventhandler.setdefault(self.event, [])
def add(self, func):
"""Add new event handler function.
Event handler function must be defined like func(sender, earg).
You can add handler also by using '+=' operator.
"""
self._getfunctionlist().append(func)
return self
def remove(self, func):
"""Remove existing event handler function.
You can remove handler also by using '-=' operator.
"""
self._getfunctionlist().remove(func)
return self
def fire(self, earg=None):
"""Fire event and call all handler functions
You can call EventHandler object itself like e(earg) instead of
e.fire(earg).
"""
for func in self._getfunctionlist():
func(self.obj, earg)
__iadd__ = add
__isub__ = remove
__call__ = fire
class BGLib(object):
def gecko_cmd_dfu_reset(self, dfu):
return struct.pack('<4BB', 0x20, 1, 0, 0, dfu)
def gecko_cmd_dfu_flash_set_address(self, address):
return struct.pack('<4BI', 0x20, 4, 0, 1, address)
def gecko_cmd_dfu_flash_upload(self, data):
return struct.pack('<4BB' + str(len(data)) + 's', 0x20, 1 + len(data), 0, 2, len(data), bytes(i for i in data))
def gecko_cmd_dfu_flash_upload_finish(self):
return struct.pack('<4B', 0x20, 0, 0, 3)
def gecko_cmd_system_hello(self):
return struct.pack('<4B', 0x20, 0, 1, 0)
def gecko_cmd_system_reset(self, dfu):
return struct.pack('<4BB', 0x20, 1, 1, 1, dfu)
def gecko_cmd_system_get_bt_address(self):
return struct.pack('<4B', 0x20, 0, 1, 3)
def gecko_cmd_system_set_bt_address(self, address):
return struct.pack('<4B6s', 0x20, 6, 1, 4, bytes(i for i in address))
def gecko_cmd_system_set_tx_power(self, power):
return struct.pack('<4Bh', 0x20, 2, 1, 10, power)
def gecko_cmd_system_get_random_data(self, length):
return struct.pack('<4BB', 0x20, 1, 1, 11, length)
def gecko_cmd_system_halt(self, halt):
return struct.pack('<4BB', 0x20, 1, 1, 12, halt)
def gecko_cmd_system_set_device_name(self, type, name):
return struct.pack('<4BBB' + str(len(name)) + 's', 0x20, 2 + len(name), 1, 13, type, len(name), bytes(i for i in name))
def gecko_cmd_system_linklayer_configure(self, key, data):
return struct.pack('<4BBB' + str(len(data)) + 's', 0x20, 2 + len(data), 1, 14, key, len(data), bytes(i for i in data))
def gecko_cmd_system_get_counters(self, reset):
return struct.pack('<4BB', 0x20, 1, 1, 15, reset)
def gecko_cmd_system_data_buffer_write(self, data):
return struct.pack('<4BB' + str(len(data)) + 's', 0x20, 1 + len(data), 1, 18, len(data), bytes(i for i in data))
def gecko_cmd_system_set_identity_address(self, address, type):
return struct.pack('<4B6sB', 0x20, 7, 1, 19, bytes(i for i in address), type)
def gecko_cmd_system_data_buffer_clear(self):
return struct.pack('<4B', 0x20, 0, 1, 20)
def gecko_cmd_le_gap_open(self, address, address_type):
return struct.pack('<4B6sB', 0x20, 7, 3, 0, bytes(i for i in address), address_type)
def gecko_cmd_le_gap_set_mode(self, discover, connect):
return struct.pack('<4BBB', 0x20, 2, 3, 1, discover, connect)
def gecko_cmd_le_gap_discover(self, mode):
return struct.pack('<4BB', 0x20, 1, 3, 2, mode)
def gecko_cmd_le_gap_end_procedure(self):
return struct.pack('<4B', 0x20, 0, 3, 3)
def gecko_cmd_le_gap_set_adv_parameters(self, interval_min, interval_max, channel_map):
return struct.pack('<4BHHB', 0x20, 5, 3, 4, interval_min, interval_max, channel_map)
def gecko_cmd_le_gap_set_conn_parameters(self, min_interval, max_interval, latency, timeout):
return struct.pack('<4BHHHH', 0x20, 8, 3, 5, min_interval, max_interval, latency, timeout)
def gecko_cmd_le_gap_set_scan_parameters(self, scan_interval, scan_window, active):
return struct.pack('<4BHHB', 0x20, 5, 3, 6, scan_interval, scan_window, active)
def gecko_cmd_le_gap_set_adv_data(self, scan_rsp, adv_data):
return struct.pack('<4BBB' + str(len(adv_data)) + 's', 0x20, 2 + len(adv_data), 3, 7, scan_rsp, len(adv_data), bytes(i for i in adv_data))
def gecko_cmd_le_gap_set_adv_timeout(self, maxevents):
return struct.pack('<4BB', 0x20, 1, 3, 8, maxevents)
def gecko_cmd_le_gap_set_conn_phy(self, preferred_phy, accepted_phy):
return struct.pack('<4BBB', 0x20, 2, 3, 9, preferred_phy, accepted_phy)
def gecko_cmd_le_gap_bt5_set_mode(self, handle, discover, connect, maxevents, address_type):
return struct.pack('<4BBBBHB', 0x20, 6, 3, 10, handle, discover, connect, maxevents, address_type)
def gecko_cmd_le_gap_bt5_set_adv_parameters(self, handle, interval_min, interval_max, channel_map, report_scan):
return struct.pack('<4BBHHBB', 0x20, 7, 3, 11, handle, interval_min, interval_max, channel_map, report_scan)
def gecko_cmd_le_gap_bt5_set_adv_data(self, handle, scan_rsp, adv_data):
return struct.pack('<4BBBB' + str(len(adv_data)) + 's', 0x20, 3 + len(adv_data), 3, 12, handle, scan_rsp, len(adv_data), bytes(i for i in adv_data))
def gecko_cmd_le_gap_set_privacy_mode(self, privacy, interval):
return struct.pack('<4BBB', 0x20, 2, 3, 13, privacy, interval)
def gecko_cmd_le_gap_set_advertise_timing(self, handle, interval_min, interval_max, duration, maxevents):
return struct.pack('<4BBIIHB', 0x20, 12, 3, 14, handle, interval_min, interval_max, duration, maxevents)
def gecko_cmd_le_gap_set_advertise_channel_map(self, handle, channel_map):
return struct.pack('<4BBB', 0x20, 2, 3, 15, handle, channel_map)
def gecko_cmd_le_gap_set_advertise_report_scan_request(self, handle, report_scan_req):
return struct.pack('<4BBB', 0x20, 2, 3, 16, handle, report_scan_req)
def gecko_cmd_le_gap_set_advertise_phy(self, handle, primary_phy, secondary_phy):
return struct.pack('<4BBBB', 0x20, 3, 3, 17, handle, primary_phy, secondary_phy)
def gecko_cmd_le_gap_set_advertise_configuration(self, handle, configurations):
return struct.pack('<4BBI', 0x20, 5, 3, 18, handle, configurations)
def gecko_cmd_le_gap_clear_advertise_configuration(self, handle, configurations):
return struct.pack('<4BBI', 0x20, 5, 3, 19, handle, configurations)
def gecko_cmd_le_gap_start_advertising(self, handle, discover, connect):
return struct.pack('<4BBBB', 0x20, 3, 3, 20, handle, discover, connect)
def gecko_cmd_le_gap_stop_advertising(self, handle):
return struct.pack('<4BB', 0x20, 1, 3, 21, handle)
def gecko_cmd_le_gap_set_discovery_timing(self, phys, scan_interval, scan_window):
return struct.pack('<4BBHH', 0x20, 5, 3, 22, phys, scan_interval, scan_window)
def gecko_cmd_le_gap_set_discovery_type(self, phys, scan_type):
return struct.pack('<4BBB', 0x20, 2, 3, 23, phys, scan_type)
def gecko_cmd_le_gap_start_discovery(self, scanning_phy, mode):
return struct.pack('<4BBB', 0x20, 2, 3, 24, scanning_phy, mode)
def gecko_cmd_le_gap_set_data_channel_classification(self, channel_map):
return struct.pack('<4BB' + str(len(channel_map)) + 's', 0x20, 1 + len(channel_map), 3, 25, len(channel_map), bytes(i for i in channel_map))
def gecko_cmd_le_gap_connect(self, address, address_type, initiating_phy):
return struct.pack('<4B6sBB', 0x20, 8, 3, 26, bytes(i for i in address), address_type, initiating_phy)
def gecko_cmd_le_gap_set_advertise_tx_power(self, handle, power):
return struct.pack('<4BBh', 0x20, 3, 3, 27, handle, power)
def gecko_cmd_le_gap_set_discovery_extended_scan_response(self, enable):
return struct.pack('<4BB', 0x20, 1, 3, 28, enable)
def gecko_cmd_le_gap_start_periodic_advertising(self, handle, interval_min, interval_max, flags):
return struct.pack('<4BBHHI', 0x20, 9, 3, 29, handle, interval_min, interval_max, flags)
def gecko_cmd_le_gap_stop_periodic_advertising(self, handle):
return struct.pack('<4BB', 0x20, 1, 3, 31, handle)
def gecko_cmd_le_gap_set_long_advertising_data(self, handle, packet_type):
return struct.pack('<4BBB', 0x20, 2, 3, 32, handle, packet_type)
def gecko_cmd_le_gap_enable_whitelisting(self, enable):
return struct.pack('<4BB', 0x20, 1, 3, 33, enable)
def gecko_cmd_le_gap_set_conn_timing_parameters(self, min_interval, max_interval, latency, timeout, min_ce_length, max_ce_length):
return struct.pack('<4BHHHHHH', 0x20, 12, 3, 34, min_interval, max_interval, latency, timeout, min_ce_length, max_ce_length)
def gecko_cmd_le_gap_set_advertise_random_address(self, handle, addr_type, address):
return struct.pack('<4BBB6s', 0x20, 8, 3, 37, handle, addr_type, bytes(i for i in address))
def gecko_cmd_le_gap_clear_advertise_random_address(self, handle):
return struct.pack('<4BB', 0x20, 1, 3, 38, handle)
def gecko_cmd_sync_open(self, adv_sid, skip, timeout, address, address_type):
return struct.pack('<4BBHH6sB', 0x20, 12, 66, 0, adv_sid, skip, timeout, bytes(i for i in address), address_type)
def gecko_cmd_sync_close(self, sync):
return struct.pack('<4BB', 0x20, 1, 66, 1, sync)
def gecko_cmd_le_connection_set_parameters(self, connection, min_interval, max_interval, latency, timeout):
return struct.pack('<4BBHHHH', 0x20, 9, 8, 0, connection, min_interval, max_interval, latency, timeout)
def gecko_cmd_le_connection_get_rssi(self, connection):
return struct.pack('<4BB', 0x20, 1, 8, 1, connection)
def gecko_cmd_le_connection_disable_slave_latency(self, connection, disable):
return struct.pack('<4BBB', 0x20, 2, 8, 2, connection, disable)
def gecko_cmd_le_connection_set_phy(self, connection, phy):
return struct.pack('<4BBB', 0x20, 2, 8, 3, connection, phy)
def gecko_cmd_le_connection_close(self, connection):
return struct.pack('<4BB', 0x20, 1, 8, 4, connection)
def gecko_cmd_le_connection_set_timing_parameters(self, connection, min_interval, max_interval, latency, timeout, min_ce_length, max_ce_length):
return struct.pack('<4BBHHHHHH', 0x20, 13, 8, 5, connection, min_interval, max_interval, latency, timeout, min_ce_length, max_ce_length)
def gecko_cmd_le_connection_read_channel_map(self, connection):
return struct.pack('<4BB', 0x20, 1, 8, 6, connection)
def gecko_cmd_le_connection_set_preferred_phy(self, connection, preferred_phy, accepted_phy):
return struct.pack('<4BBBB', 0x20, 3, 8, 7, | |
"""Rotations in three dimensions - SO(3).
See :doc:`rotations` for more information.
"""
import warnings
import math
import numpy as np
from numpy.testing import assert_array_almost_equal
unitx = np.array([1.0, 0.0, 0.0])
unity = np.array([0.0, 1.0, 0.0])
unitz = np.array([0.0, 0.0, 1.0])
R_id = np.eye(3)
a_id = np.array([1.0, 0.0, 0.0, 0.0])
q_id = np.array([1.0, 0.0, 0.0, 0.0])
q_i = np.array([0.0, 1.0, 0.0, 0.0])
q_j = np.array([0.0, 0.0, 1.0, 0.0])
q_k = np.array([0.0, 0.0, 0.0, 1.0])
e_xyz_id = np.array([0.0, 0.0, 0.0])
e_zyx_id = np.array([0.0, 0.0, 0.0])
p0 = np.array([0.0, 0.0, 0.0])
eps = 1e-7
def norm_vector(v):
"""Normalize vector.
Parameters
----------
v : array-like, shape (n,)
nd vector
Returns
-------
u : array, shape (n,)
nd unit vector with norm 1 or the zero vector
"""
norm = np.linalg.norm(v)
if norm == 0.0:
return v
else:
return np.asarray(v) / norm
def norm_matrix(R):
"""Normalize rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix with small numerical errors
Returns
-------
R : array, shape (3, 3)
Normalized rotation matrix
"""
R = np.asarray(R)
c2 = R[:, 1]
c3 = norm_vector(R[:, 2])
c1 = norm_vector(np.cross(c2, c3))
c2 = norm_vector(np.cross(c3, c1))
return np.column_stack((c1, c2, c3))
def norm_angle(a):
"""Normalize angle to (-pi, pi].
Parameters
----------
a : float or array-like, shape (n,)
Angle(s) in radians
Returns
-------
a_norm : float or array-like, shape (n,)
Normalized angle(s) in radians
"""
# Source of the solution: http://stackoverflow.com/a/32266181
return -((np.pi - np.asarray(a)) % (2.0 * np.pi) - np.pi)
def norm_axis_angle(a):
"""Normalize axis-angle representation.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The length
of the axis vector is 1 and the angle is in [0, pi). No rotation
is represented by [1, 0, 0, 0].
"""
angle = a[3]
norm = np.linalg.norm(a[:3])
if angle == 0.0 or norm == 0.0:
return np.array([1.0, 0.0, 0.0, 0.0])
res = np.empty(4)
res[:3] = a[:3] / norm
angle = norm_angle(angle)
if angle < 0.0:
angle *= -1.0
res[:3] *= -1.0
res[3] = angle
return res
def norm_compact_axis_angle(a):
"""Normalize compact axis-angle representation.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z).
The angle is in [0, pi). No rotation is represented by [0, 0, 0].
"""
angle = np.linalg.norm(a)
if angle == 0.0:
return np.zeros(3)
axis = a / angle
return axis * norm_angle(angle)
def perpendicular_to_vectors(a, b):
"""Compute perpendicular vector to two other vectors.
Parameters
----------
a : array-like, shape (3,)
3d vector
b : array-like, shape (3,)
3d vector
Returns
-------
c : array-like, shape (3,)
3d vector that is orthogonal to a and b
"""
return np.cross(a, b)
def perpendicular_to_vector(a):
"""Compute perpendicular vector to one other vector.
There is an infinite number of solutions to this problem. Thus, we
restrict the solutions to [1, 0, z] and return [0, 0, 1] if the
z component of a is 0.
Parameters
----------
a : array-like, shape (3,)
3d vector
Returns
-------
b : array-like, shape (3,)
A 3d vector that is orthogonal to a. It does not necessarily have
unit length.
"""
if abs(a[2]) < eps:
return np.copy(unitz)
# Now that we solved the problem for [x, y, 0], we can solve it for all
# other vectors by restricting solutions to [1, 0, z] and find z.
# The dot product of orthogonal vectors is 0, thus
# a[0] * 1 + a[1] * 0 + a[2] * z == 0 or -a[0] / a[2] = z
return np.array([1.0, 0.0, -a[0] / a[2]])
def angle_between_vectors(a, b, fast=False):
"""Compute angle between two vectors.
Parameters
----------
a : array-like, shape (n,)
nd vector
b : array-like, shape (n,)
nd vector
fast : bool, optional (default: False)
Use fast implementation instead of numerically stable solution
Returns
-------
angle : float
Angle between a and b
"""
if len(a) != 3 or fast:
return np.arccos(
np.clip(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)),
-1.0, 1.0))
else:
return np.arctan2(np.linalg.norm(np.cross(a, b)), np.dot(a, b))
def vector_projection(a, b):
"""Orthogonal projection of vector a on vector b.
Parameters
----------
a : array-like, shape (3,)
Vector a that will be projected on vector b
b : array-like, shape (3,)
Vector b on which vector a will be projected
Returns
-------
a_on_b : array, shape (3,)
Vector a
"""
b_norm_squared = np.dot(b, b)
if b_norm_squared == 0.0:
return np.zeros(3)
return np.dot(a, b) * b / b_norm_squared
def random_vector(random_state=np.random.RandomState(0), n=3):
"""Generate an nd vector with normally distributed components.
Each component will be sampled from :math:`\mathcal{N}(\mu=0, \sigma=1)`.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
n : int, optional (default: 3)
Number of vector components
Returns
-------
v : array-like, shape (n,)
Random vector
"""
return random_state.randn(n)
def random_axis_angle(random_state=np.random.RandomState(0)):
"""Generate random axis-angle.
The angle will be sampled uniformly from the interval :math:`[0, \pi)`
and each component of the rotation axis will be sampled from
:math:`\mathcal{N}(\mu=0, \sigma=1)` and than the axis will be normalized
to length 1.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
"""
angle = np.pi * random_state.rand()
a = np.array([0, 0, 0, angle])
a[:3] = norm_vector(random_state.randn(3))
return a
def random_compact_axis_angle(random_state=np.random.RandomState(0)):
"""Generate random compact axis-angle.
The angle will be sampled uniformly from the interval :math:`[0, \pi)`
and each component of the rotation axis will be sampled from
:math:`\mathcal{N}(\mu=0, \sigma=1)` and than the axis will be normalized
to length 1.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
"""
a = random_axis_angle(random_state)
return a[:3] * a[3]
def random_quaternion(random_state=np.random.RandomState(0)):
"""Generate random quaternion.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
"""
return norm_vector(random_state.randn(4))
def cross_product_matrix(v):
"""Generate the cross-product matrix of a vector.
The cross-product matrix :math:`\\boldsymbol{V}` satisfies the equation
.. math::
\\boldsymbol{V} \\boldsymbol{w} = \\boldsymbol{v} \\times
\\boldsymbol{w}
It is a skew-symmetric (antisymmetric) matrix, i.e.
:math:`-\\boldsymbol{V} = \\boldsymbol{V}^T`.
Parameters
----------
v : array-like, shape (3,)
3d vector
Returns
-------
V : array-like, shape (3, 3)
Cross-product matrix
"""
return np.array([[0.0, -v[2], v[1]],
[v[2], 0.0, -v[0]],
[-v[1], v[0], 0.0]])
def check_skew_symmetric_matrix(V, tolerance=1e-6, strict_check=True):
"""Input validation of a skew-symmetric matrix.
Check whether the transpose of the matrix is its negative:
.. math::
V^T = -V
Parameters
----------
V : array-like, shape (3, 3)
Cross-product matrix
tolerance : float, optional (default: 1e-6)
Tolerance threshold for checks.
strict_check : bool, optional (default: True)
Raise a ValueError if V.T is not numerically close enough to -V.
Otherwise we print a warning.
Returns
-------
V : array-like, shape (3, 3)
Validated cross-product matrix
"""
V = np.asarray(V, dtype=np.float)
if V.ndim != 2 or V.shape[0] != 3 or V.shape[1] != 3:
raise ValueError("Expected skew-symmetric matrix with shape (3, 3), "
"got array-like object with shape %s" % (V.shape,))
if not np.allclose(V.T, -V, atol=tolerance):
error_msg = ("Expected skew-symmetric matrix, but it failed the test "
"V.T = %r\n-V = %r" % (V.T, -V))
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
return V
def check_matrix(R, tolerance=1e-6, strict_check=True):
"""Input validation of a rotation matrix.
We check whether R multiplied by its inverse is approximately the identity
matrix and the determinant is approximately 1.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
tolerance : float, optional (default: 1e-6)
Tolerance threshold for checks. Default tolerance is the same as in
assert_rotation_matrix(R).
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
R : array, shape (3, 3)
Validated rotation matrix
"""
| |
<reponame>qmaster0803/telegram_gallery_bot
import config_module
import storage_module
import sqlite3
import json
import os
import glob
from datetime import datetime
def open_connection(db_path=config_module.main_db_path):
try:
db_conn = sqlite3.connect(db_path)
except:
print("Error connecting to database!")
return db_conn
def init_database(): #create tables only if they not exist, so we can run this on every bot startup
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT `name` FROM `sqlite_master` WHERE `type`="table" AND `name`="users";')
if(cur.fetchone() == None):
cur = db_conn.cursor()
cur.executescript('''
BEGIN TRANSACTION;
CREATE TABLE IF NOT EXISTS "galleries" (
"id" INTEGER UNIQUE,
"name" TEXT,
"deleted" INTEGER DEFAULT 0,
"autorotate" INTEGER DEFAULT 0,
PRIMARY KEY("id" AUTOINCREMENT)
);
CREATE TABLE IF NOT EXISTS "processing_queue" (
"id" INTEGER UNIQUE,
"path" TEXT,
"user_id" INTEGER,
"gallery_id" INTEGER,
"action" TEXT,
PRIMARY KEY("id" AUTOINCREMENT)
);
CREATE TABLE IF NOT EXISTS "users" (
"id" INTEGER UNIQUE,
"rights" INTEGER,
"galleries" TEXT DEFAULT '[]',
"current_working_gallery" INTEGER DEFAULT -1,
PRIMARY KEY("id")
);
COMMIT;
''')
print("WARNING! This is first launch of the program. Send /start to bot to register you as admin. Press enter to continue.")
return False
else:
print("Main database initialized.")
return True
#----------------------------------------------------------------------------------------------------
# USERS MODIFYING FUNCTIONS
#----------------------------------------------------------------------------------------------------
def add_user(user_id, rights):
db_conn = open_connection()
cur = db_conn.cursor()
if(rights == '1' or rights == 'admin'): rights_id = 1
else: rights_id = 0
try:
cur.execute('INSERT INTO `users` (`id`,`rights`) VALUES ({}, {});'.format(user_id, rights_id))
except sqlite3.IntegrityError as e:
if(str(e).startswith("UNIQUE constraint failed")):
return 1
db_conn.commit()
db_conn.close()
return 0
def modify_user(user_id, rights):
db_conn = open_connection()
cur = db_conn.cursor()
if(rights == '1' or rights == 'admin'): rights_id = 1
else: rights_id = 0
cur.execute('UPDATE `users` SET `rights` = {} WHERE `id`={};'.format(rights_id, user_id))
if(cur.rowcount == 0):
return 1
db_conn.commit()
#if user was accessing working gallery using admin rights, reset working gallery
if(get_current_working_gallery(user_id) not in get_user_galleries(user_id)):
del cur
cur = db_conn.cursor()
cur.execute('UPDATE `users` SET `current_working_gallery`=-1 WHERE `id`={};'.format(user_id))
db_conn.commit()
db_conn.close()
return 0
def delete_user(user_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('DELETE FROM `users` WHERE `id`={};'.format(user_id))
if(cur.rowcount == 0):
return 1
db_conn.commit()
db_conn.close()
return 0
def check_user_rights(user_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT * FROM `users` WHERE `id`={};'.format(user_id))
row = cur.fetchone()
db_conn.close()
try:
return row[1]
except TypeError:
return -1
def get_user_galleries(user_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT `galleries` FROM `users` WHERE `id`={};'.format(user_id))
galleries = cur.fetchone()[0]
db_conn.close()
return json.loads(galleries)
def allow_gallery(user_id, gallery_id):
db_conn = open_connection()
#check user exists
cur = db_conn.cursor()
cur.execute('SELECT * FROM `users` WHERE `id`={};'.format(user_id))
if(cur.fetchone() == None): return 1
#check gallery exists
cur = db_conn.cursor()
cur.execute('SELECT * FROM `galleries` WHERE `id`={} AND `deleted`=0;'.format(gallery_id))
if(cur.fetchone() == None): return 1
#reload json string here
cur = db_conn.cursor()
cur.execute('SELECT `galleries` FROM `users` WHERE `id`={};'.format(user_id))
galleries_list = json.loads(cur.fetchone()[0])
if(int(gallery_id) in galleries_list): return 1
galleries_list.append(int(gallery_id))
del cur
cur = db_conn.cursor()
cur.execute('UPDATE `users` SET `galleries`="{}" WHERE `id`={};'.format(json.dumps(galleries_list), user_id))
db_conn.commit()
db_conn.close()
def deny_gallery(user_id, gallery_id):
db_conn = open_connection()
#check user exists
cur = db_conn.cursor()
cur.execute('SELECT * FROM `users` WHERE `id`={};'.format(user_id))
if(cur.fetchone() == None): return 1
#reload json string here
cur = db_conn.cursor()
cur.execute('SELECT `galleries` FROM `users` WHERE `id`={};'.format(user_id))
galleries_list = json.loads(cur.fetchone()[0])
if(int(gallery_id) not in galleries_list): return 1
galleries_list.remove(int(gallery_id))
del cur
cur = db_conn.cursor()
cur.execute('UPDATE `users` SET `galleries`="{}" WHERE `id`={};'.format(json.dumps(galleries_list), user_id))
db_conn.commit()
#set user's working gallery to none (-1) if denied gallery was choosen
cur = db_conn.cursor()
cur.execute('UPDATE `users` SET `current_working_gallery`=-1 WHERE `id`={} AND `current_working_gallery`={};'.format(user_id, int(gallery_id)))
db_conn.commit()
db_conn.close()
def select_working_gallery(user_id, gallery_id):
if(gallery_id in get_user_galleries(user_id) or check_user_rights(user_id) == 1):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('UPDATE `users` SET `current_working_gallery`={} WHERE `id`={};'.format(gallery_id, user_id))
db_conn.commit()
db_conn.close()
return 0
else: return 1
def get_current_working_gallery(user_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT `current_working_gallery` FROM `users` WHERE `id`={};'.format(user_id))
cwg = cur.fetchone()[0]
db_conn.close()
if(int(cwg) == -1): return None
else: return int(cwg)
def check_gallery_not_deleted(gallery_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT `deleted` FROM `galleries` WHERE `id`={};'.format(gallery_id))
deleted = cur.fetchone()[0]
db_conn.close()
return deleted == 0
#----------------------------------------------------------------------------------------------------
# GALLERIES MODIFYING FUNCTIONS
#----------------------------------------------------------------------------------------------------
def get_gallery_info(gallery_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT * FROM `galleries` WHERE `id`="{}";'.format(gallery_id))
result = cur.fetchone()
db_conn.close()
return result
def create_new_gallery(name):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT * FROM `galleries` WHERE `name`="{}";'.format(name))
if(cur.fetchone() != None): return -1
del cur
cur = db_conn.cursor()
cur.execute('INSERT INTO `galleries` (`name`) VALUES ("{}");'.format(name))
db_conn.commit()
cur = db_conn.cursor()
cur.execute('SELECT `id` FROM `galleries` WHERE `name`="{}";'.format(name))
new_gallery_id = cur.fetchone()[0]
db_conn.close()
return new_gallery_id
def delete_gallery(gallery_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('UPDATE `galleries` SET `deleted`=1 WHERE `id`={};'.format(gallery_id))
db_conn.commit()
ok = cur.rowcount
#reset all users that have this gallery as working to empty working gallery (-1)
del cur
cur = db_conn.cursor()
cur.execute('UPDATE `users` SET `current_working_gallery`=-1 WHERE `current_working_gallery`={};'.format(gallery_id))
db_conn.commit()
db_conn.close()
return ok
def get_galleries_list(user_id):
db_conn = open_connection()
cur = db_conn.cursor()
cur.execute('SELECT * FROM `galleries`;')
result = []
if(check_user_rights(user_id) != 1): galleries_of_user = get_user_galleries(user_id)
for gallery in cur.fetchall():
if((check_user_rights(user_id) == 1 or gallery[0] in galleries_of_user) and gallery[2] == 0):
result.append("ID {}: {}".format(gallery[0], gallery[1]))
db_conn.close()
if(len(result) == 0): result.append("There are nothing here!")
return result
def add_photo_to_gallery(db_path, date, checksum, size): #returns id of file because of using auto-increment column in db
db_conn = open_connection(db_path=db_path)
cur = db_conn.cursor()
if(date != None): cur.execute('INSERT INTO `photos` (`date`,`checksum`,`size`) VALUES ({}, "{}", {});'.format(date, checksum, size))
else: cur.execute('INSERT INTO `photos` (`date`,`checksum`,`size`) VALUES (NULL, "{}", {});'.format(checksum, size))
db_conn.commit();
#get id of inserted line
cur = db_conn.cursor()
if(date != None): cur.execute('SELECT `id` FROM `photos` WHERE `date`={} AND `checksum`="{}" AND `size`={};'.format(date, checksum, size))
else: cur.execute('SELECT `id` FROM `photos` WHERE `date` IS NULL AND `checksum`="{}" AND `size`={};'.format(checksum, size))
new_line_id = cur.fetchone()[0]
return new_line_id
def modify_photo_checksum(db_path, photo_id, checksum):
db_conn = open_connection(db_path=db_path)
cur = db_conn.cursor()
cur.execute('UPDATE `photos` SET `checksum`="{}" WHERE `id`={};'.format(checksum, photo_id))
db_conn.commit()
db_conn.close()
def check_photo_exists(db_path, size, checksum):
db_conn = open_connection(db_path=db_path)
cur = db_conn.cursor()
cur.execute('SELECT * FROM `photos` WHERE `size`={} AND `checksum`="{}";'.format(size, checksum))
count = len(cur.fetchall())
db_conn.close()
return count != 0
def count_photos_in_month(db_path, year, month):
db_conn = open_connection(db_path=db_path)
#calculate timestamps range
min_timestamp = int(datetime.timestamp(datetime(year=year, month=month, day=1)))
if(month != 12): max_timestamp = int(datetime.timestamp(datetime(year=year, month=month+1, day=1))-1)
else: max_timestamp = int(datetime.timestamp(datetime(year=year+1, month=1, day=1))-1) #01 Jan of next year as max timestamp if checking Dec
#find photos between this timestamps
cur = db_conn.cursor()
cur.execute('SELECT `id` FROM `photos` WHERE `date`>={} AND `date`<={};'.format(min_timestamp, max_timestamp))
count = len(cur.fetchall())
db_conn.close()
return count
def find_months_in_gallery(db_path):
result = []
db_conn = open_connection(db_path=db_path)
#count entries without date
cur = db_conn.cursor()
cur.execute('SELECT `id` FROM `photos` WHERE `date` IS NULL;')
entries_without_date = len(cur.fetchall())
del cur
#count entries with date
cur = db_conn.cursor()
cur.execute('SELECT `id` FROM `photos` WHERE `date` IS NOT NULL;')
entries_with_date = len(cur.fetchall())
del cur
if(entries_without_date > 0): result.append([None, None, entries_without_date])
if(entries_with_date > 0):
#find first photo date in db
cur = db_conn.cursor()
cur.execute('SELECT `date` FROM `photos` WHERE `date` IS NOT NULL ORDER BY `date` ASC LIMIT 1;')
first_date = cur.fetchone()[0]
first_year = datetime.fromtimestamp(first_date).year
first_month = datetime.fromtimestamp(first_date).month
del cur
#find last photo date in db
cur = db_conn.cursor()
cur.execute('SELECT `date` FROM `photos` WHERE `date` IS NOT NULL ORDER BY `date` DESC LIMIT 1;')
last_date = cur.fetchone()[0]
last_year = datetime.fromtimestamp(last_date).year
last_month = datetime.fromtimestamp(last_date).month
del cur
#iterate throught monthes from first date to last date and check their existance
if(first_year != last_year): #if we need to scan more than part of year
for year in range(first_year, last_year+1):
if(year == first_year): month_range = range(first_month, 12+1) #iterate from first_month to 12
elif(year == last_year): month_range = range(1, last_month+1) #iterate from 1 to last_month
else: month_range = range(1, 12+1) #iterate from 1 to 12
for month in month_range:
count = count_photos_in_month(db_path, year, month)
if(count > 0): result.append([year, month, count])
else:
for month in range(first_month, last_month+1):
count = count_photos_in_month(db_path, first_year, month)
if(count > 0): result.append([first_year, month, count])
if(len(result) > 0): return result
else: return None
def select_all_photos_of_month(db_path, gallery_id, year, month, use_thumbs=False):
db_conn = open_connection(db_path=db_path)
cur = db_conn.cursor()
if(year != 0 and month != 0): #photos without exif marked as 0/0
#calculate timestamps range
min_timestamp = int(datetime.timestamp(datetime(year=year, month=month, day=1)))
if(month != 12): max_timestamp = int(datetime.timestamp(datetime(year=year, month=month+1, day=1))-1)
else: max_timestamp = int(datetime.timestamp(datetime(year=year+1, month=1, day=1))-1) #01 Jan of next year as max timestamp if checking Dec
#find all photos between this timestamps
cur.execute('SELECT `id` FROM `photos` WHERE `date`>={} AND `date`<={};'.format(min_timestamp, max_timestamp))
else:
cur.execute('SELECT `id` FROM `photos` WHERE `date` IS NULL;')
photos = cur.fetchall()
result_paths = []
for photo_id in photos:
#there are no file extensions, stored in database, so we can guess extension because of unique filenames
if(use_thumbs): mask = os.path.join(config_module.library_path, str(gallery_id), str(photo_id[0])+"_thumb.*")
else: mask | |
import requests
import time
import sqlalchemy
import yaml
import os
import pandas as pd
import numpy as np
import logging
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from concurrent.futures import ThreadPoolExecutor, as_completed
from sqlalchemy.types import VARCHAR
from sqlalchemy.types import DateTime
from sqlalchemy import MetaData,Column, Table
from sqlalchemy.orm import sessionmaker, mapper
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import Index
from threading import Thread
from sqlalchemy.exc import ArgumentError
from sqlalchemy.sql import case
import sqlalchemy
import re
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from askdata.askdata_client import Agent
import sys
from datetime import datetime
_LOG_FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] - %(asctime)s --> %(message)s"
g_logger = logging.getLogger()
logging.basicConfig(format=_LOG_FORMAT)
g_logger.setLevel(logging.INFO)
root_dir = os.path.abspath(os.path.dirname(__file__))
# retrieving base url
yaml_path = os.path.join(root_dir, '../askdata/askdata_config/base_url.yaml')
with open(yaml_path, 'r') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
url_list = yaml.load(file, Loader=yaml.FullLoader)
class Dataset():
'''
Dataset Object
'''
_agentId = None
_domain = None
def __init__(self, env, token):
self._headers = {
"Content-Type": "application/json",
"Authorization": "Bearer" + " " + token
}
if env == 'dev':
self._base_url_dataset = url_list['BASE_URL_DATASET_DEV']
self._base_url_askdata = url_list['BASE_URL_ASKDATA_DEV']
if env == 'qa':
self._base_url_dataset = url_list['BASE_URL_DATASET_QA']
self._base_url_askdata = url_list['BASE_URL_ASKDATA_QA']
if env == 'prod':
self._base_url_dataset = url_list['BASE_URL_DATASET_PROD']
self._base_url_askdata = url_list['BASE_URL_ASKDATA_PROD']
def _get_info_dataset_by_slug(self, slug: str):
list_datasets = self.list_datasets()
dataset = list_datasets[list_datasets['slug'] == slug]
self._dataset_type = dataset.iloc[0]['type']
self._dataset_id = dataset.iloc[0]['id']
self._dataset_code = dataset.iloc[0]['code']
self._dataset_name = dataset.iloc[0]['name']
self._dataset_slug = dataset.iloc[0]['slug']
self._dataset_icon = dataset.iloc[0]['icon']
self._dataset_createdby = dataset.iloc[0]['createdBy']
def list_datasets(self):
s = requests.Session()
s.keep_alive = False
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
dataset_url = self._base_url_dataset + '/datasets?agentId=' + self._agentId
response = s.get(url=dataset_url, headers=self._headers)
response.raise_for_status()
r = response.json()
r_df = pd.DataFrame(r)
try:
if r_df.empty:
raise Exception('No datasets in the agent {}'.format(self._agentId))
else:
datasets_df = r_df.loc[:, ['id', 'domain', 'type', 'code', 'name', 'slug', 'description', 'createdBy', 'isActive',
'accessType', 'icon', 'version', 'syncCount', 'visible', 'public', 'createdAt']]
except Exception as e:
datasets_df = r_df
logging.info(e)
return datasets_df
def get_id_dataset_by_name(self, name_ds: str, exact=False):
'''
Get askdata dataset ids by name
:param name_ds: String
it's name searched
:param exact: Boolean
if param is true the method search the dataset id with exact match whereas if param is False
it searches dataset ids that contain the name_ds
:return: Array
'''
dataset_list = self.list_datasets()
if not exact:
dataset_select_name = dataset_list.name.str.contains(name_ds, flags=re.IGNORECASE, regex=True)
dataset_choose = dataset_list[dataset_select_name]
else:
dataset_choose = dataset_list[dataset_list['name'] == name_ds]
if dataset_choose.empty:
raise Exception('No datasets {} in the agent'.format(name_ds))
#return an array
return dataset_choose.id.values
def get_dataset_id(self)->str:
"""
get dataset id from the dataset instantiated with slug
:return: dataset_id: str
"""
if hasattr(self,'_dataset_id'):
return self._dataset_id
else:
raise Exception("dataset didn't instantiate with slug")
# TODO: Cancellare dopo aver verificato che load_datset_to_df va ok perchè possiamo riutilizzare __get_dataset_settings_info per avere le stesse informazione di return
# def __get_dataset_connection(self, datasetid):
#
# s = requests.Session()
# s.keep_alive = False
# retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
# s.mount('https://', HTTPAdapter(max_retries=retries))
#
# dataset_url = self._base_url_dataset + '/datasets?agentId=' + self._agentId
# response = requests.get(url=dataset_url, headers=self._headers)
# response.raise_for_status()
# r = response.json()
# connection_df = pd.DataFrame([row['connection'] for row in r if row['id'] == datasetid])
# id_createdby = [row['createdBy'] for row in r if row['id'] == datasetid][0]
# return connection_df.table_id.item(), connection_df.schema.item(), id_createdby
def load_entities_dataset(self, datasetid, select_custom=True):
df_datasets = self.list_datasets()
dataset_info = df_datasets[df_datasets['id'] == datasetid]
with requests.Session() as s:
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
authentication_url = '{}/smartbot/dataset/type/{}/id/{}/subset/{}?_page=0&_limit=1000'.format(
self._base_url_askdata,dataset_info.type.item(),dataset_info.id.item(),dataset_info.type.item())
r = s.get(url=authentication_url, headers=self._headers)
r.raise_for_status()
# get all entity
entities_df = pd.DataFrame(r.json()['payload']['data'])
# copy entity not custom
entities_df_no_cust = entities_df[entities_df['custom'] == False].copy()
index_nocust = entities_df_no_cust.index
# select columnid only with custom = false
columnsid = [row['columnId'] for row in entities_df.loc[index_nocust,'schemaMetaData']]
entitie_code = entities_df.code.tolist()
#select code of entities with custom = False
if select_custom == False:
entitie_code = entities_df.loc[index_nocust, 'code'].tolist()
return entitie_code, columnsid
def execute_dataset_sync(self, dataset_id=''):
if dataset_id != '':
pass
elif hasattr(self, '_dataset_slug') != '' and dataset_id == '':
dataset_id = self._dataset_id
logging.info("---- sync dataset with id '{}' ----- ".format(str(self._dataset_id)))
else:
raise Exception("takes 2 positional arguments but dataset_id weren't given or dataset didn't"
" instantiate with slug")
dataset_url = self._base_url_askdata + '/smartdataset/datasets/' + dataset_id + '/sync'
r = requests.post(url=dataset_url, headers=self._headers)
r.raise_for_status()
return r
def __ask_db_engine(self, dataset_id: str, setting: dict):
# request credential
s = requests.Session()
s.keep_alive = False
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
authentication_url = self._base_url_askdata + '/smartdataset/datasets/' + dataset_id + '/onetimecredentials'
logging.info("AUTH URL {}".format(authentication_url))
response = s.get(url=authentication_url, headers=self._headers)
response.raise_for_status()
r = response.json()
logging.info("RESPONSE {}".format(r))
host = setting['datasourceUrl'].split('/')[2].split(':')[0]
port = setting['datasourceUrl'].split('/')[2].split(':')[1]
database_engine = sqlalchemy.create_engine('mysql+mysqlconnector://{0}:{1}@{2}:{3}/{4}'.
format(r['mysqlUsername'], r['mysqlPassword'], host, port,
setting['schema']), pool_recycle=3600, pool_size=5)
db_tablename = r['mysqlTable']
return database_engine, db_tablename
def __ask_del_db_engine(self, dataset_id):
s = requests.Session()
s.keep_alive = False
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
authentication_url = self._base_url_askdata + '/smartdataset/datasets/' + dataset_id + '/revokeonetimecredentials'
# dataset_url = 'https://smartsql-dev.askdata.com/custom/create'
response = s.delete(url=authentication_url, headers=self._headers)
response.raise_for_status()
logging.debug('---------------------------')
logging.debug('-------delete mysqluser for dataset {}------'.format(dataset_id))
def create_or_update_dataset(self, frame: pd.DataFrame, dataset_id:str, dataset_name="", add_indexdf = False,
indexclm = [], unique_key=[]) -> str:
# TODO: see upsert in mysql if_exists['replace','Append','upsert']
# TODO: insert unique_key
'''
Save the data frame in askdata dataset of the specific agent
Parameters
----------
frame : DataFrame
Input dataframe+
index: list of string
name : string
name of the dataset Askdata
index: list
the index is a list of column names of the data frame which are setting like indexes for increasing performance.
Default empty list
'''
#dataset_id = self.get_id_dataset_by_name(dataset_name)[0]
settings_dataset = self.__get_dataset_settings_info(dataset_id, True)["settings"]
logging.info("SETTINGS DATASET {}".format(settings_dataset))
engine, db_tablename = self.__ask_db_engine(dataset_id, settings_dataset)
logging.info("ENGINE {}\n TABLENAME {}".format(engine, db_tablename))
# with "with" we can close the connetion when we exit
with engine.connect() as connection:
# to check type of column of the Dataframa for creating a correct and performing table structure
dtype_table = dict()
for clm in frame.select_dtypes(include=np.object).columns:
maxLen = frame[clm].str.len().max()
dtype_table[clm] = VARCHAR(length=maxLen + 10)
for clm in frame.select_dtypes(include=[np.datetime64]).columns:
dtype_table[clm] = DateTime()
if not indexclm:
frame.to_sql(con=connection, name=db_tablename, if_exists='replace', chunksize=1000, index=add_indexdf,
index_label='INDEX_DF',
method='multi',dtype=dtype_table)
else:
frame.to_sql(con=connection, name=db_tablename, if_exists='replace', chunksize=1000,
method='multi',index=add_indexdf, index_label='INDEX_DF', dtype=dtype_table)
# SQL Statement to create a secondary index
for column_ind in indexclm:
sql_index = """CREATE INDEX index_{}_{} ON {}(`{}`);""".format(db_tablename, column_ind,
db_tablename, column_ind)
# Execute the sql - create index
connection.execute(sql_index)
# Now list the indexes on the table
sql_show_index = "show index from {}".format(db_tablename)
indices_mysql = connection.execute(sql_show_index)
for index_mysql in indices_mysql.fetchall():
logging.info('--- ----------- -----')
logging.info('--- add index: {}'.format(index_mysql[2]))
logging.info('--- ----------- -----')
logging.info('--- Save the Dataframe into Dataset {}'.format(dataset_name))
#run sync dataset
self.execute_dataset_sync(dataset_id)
# delete mysql user
self.__ask_del_db_engine(dataset_id)
# find list dataset
list_dataset = self.list_datasets()
slug = list_dataset[list_dataset['id'] == dataset_id].loc[:,'slug'].item()
return slug
def create_dataset(self, frame: pd.DataFrame, dataset_name: str, add_indexdf = False,
indexclm = [], unique_key=[]) -> str:
# TODO: see upsert in mysql if_exists['replace','Append','upsert']
# TODO: insert unique_key
'''
Save the data frame in askdata dataset of the specific agent
Parameters
----------
frame : DataFrame
Input dataframe+
index: list of string
name : string
name of the dataset Askdata
index: list
the index is a list of column names of the data frame which are setting like indexes for increasing performance.
Default empty list
'''
dataset_id, settings_dataset = self.__create_dataset_df(dataset_name)
engine, db_tablename = self.__ask_db_engine(dataset_id, settings_dataset)
# with "with" we can close the connetion when we exit
with engine.connect() as connection:
# to check type of column of the Dataframa for creating a correct and performing table structure
dtype_table = dict()
for clm in frame.select_dtypes(include=np.object).columns:
maxLen = frame[clm].str.len().max()
dtype_table[clm] = VARCHAR(length=maxLen + 10)
for clm in frame.select_dtypes(include=[np.datetime64]).columns:
dtype_table[clm] = DateTime()
if not indexclm:
frame.to_sql(con=connection, name=db_tablename, if_exists='replace', chunksize=1000, index=add_indexdf,
index_label='INDEX_DF',
method='multi',dtype=dtype_table)
else:
frame.to_sql(con=connection, name=db_tablename, if_exists='replace', chunksize=1000,
method='multi',index=add_indexdf, index_label='INDEX_DF', dtype=dtype_table)
# SQL Statement to create a secondary index
for column_ind in indexclm:
sql_index = """CREATE INDEX index_{}_{} ON {}(`{}`);""".format(db_tablename, column_ind,
db_tablename, column_ind)
# Execute the sql - create index
connection.execute(sql_index)
# Now list the indexes on the table
sql_show_index = "show index from {}".format(db_tablename)
indices_mysql = connection.execute(sql_show_index)
for index_mysql in indices_mysql.fetchall():
logging.info('--- ----------- -----')
logging.info('--- add index: {}'.format(index_mysql[2]))
logging.info('--- ----------- -----')
logging.info('--- Save the Dataframe into Dataset {}'.format(dataset_name))
#run sync dataset
self.execute_dataset_sync(dataset_id)
# delete mysql user
self.__ask_del_db_engine(dataset_id)
# find list dataset
list_dataset = self.list_datasets()
slug = list_dataset[list_dataset['id'] == dataset_id].loc[:,'slug'].item()
return slug
# def update_dataset(self, frame, tablename, if_exists='rename'):
# to do
# pass
def load_dataset(self, dataset_id='')-> pd.DataFrame:
'''
| |
"""
A sas7bdat reader. File format taken from
https://github.com/BioStatMatt/sas7bdat/blob/master/inst/doc/sas7bdat.rst
"""
import os
import sys
import struct
import locale
import logging
import platform
from cStringIO import StringIO
from datetime import datetime, timedelta
from collections import namedtuple
from pandas import DataFrame
__all__ = ['SAS7BDAT']
def _debug(t, v, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(t, v, tb)
else:
import pdb
import traceback
traceback.print_exception(t, v, tb)
print
pdb.pm()
os._exit(1)
def _getColorEmit(fn):
# This doesn't work on Windows since Windows doesn't support
# the ansi escape characters
def _new(handler):
levelno = handler.levelno
if levelno >= logging.CRITICAL:
color = '\x1b[31m' # red
elif levelno >= logging.ERROR:
color = '\x1b[31m' # red
elif levelno >= logging.WARNING:
color = '\x1b[33m' # yellow
elif levelno >= logging.INFO:
color = '\x1b[32m' # green or normal
elif levelno >= logging.DEBUG:
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
handler.msg = color + handler.msg + '\x1b[0m' # normal
return fn(handler)
return _new
class SAS7BDAT(object):
MAGIC = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\xea\x81\x60"\
"\xb3\x14\x11\xcf\xbd\x92\x08\x00\x09\xc7\x31\x8c\x18\x1f\x10\x11"
# Host systems known to work
KNOWNHOSTS = set(["WIN_PRO", "WIN_NT", "WIN_NTSV", "WIN_SRV", "WIN_ASRV",
"XP_PRO", "XP_HOME", "NET_ASRV", "NET_DSRV", "NET_SRV",
"WIN_98", "W32_VSPRO", "WIN", "WIN_95", "X64_VSPRO",
"AIX", "X64_ESRV", "W32_ESRV", "W32_7PRO", "W32_VSHOME",
"X64_7HOME", "X64_7PRO", "X64_SRV0", "W32_SRV0",
"X64_ES08", "Linux", "HP-UX"])
# Subheader signatures, 32 and 64 bit, little and big endian
SUBH_ROWSIZE = set(["\xF7\xF7\xF7\xF7", "\x00\x00\x00\x00\xF7\xF7\xF7\xF7",
"\xF7\xF7\xF7\xF7\x00\x00\x00\x00"])
SUBH_COLSIZE = set(["\xF6\xF6\xF6\xF6", "\x00\x00\x00\x00\xF6\xF6\xF6\xF6",
"\xF6\xF6\xF6\xF6\x00\x00\x00\x00"])
SUBH_COLTEXT = set(["\xFD\xFF\xFF\xFF", "\xFF\xFF\xFF\xFD",
"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD"])
SUBH_COLATTR = set(["\xFC\xFF\xFF\xFF", "\xFF\xFF\xFF\xFC",
"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC"])
SUBH_COLNAME = set(["\xFF\xFF\xFF\xFF",
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"])
SUBH_COLLABS = set(["\xFE\xFB\xFF\xFF", "\xFF\xFF\xFB\xFE",
"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF",
"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE"])
SUBH_COLLIST = set(["\xFE\xFF\xFF\xFF", "\xFF\xFF\xFF\xFE",
"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE"])
SUBH_SUBHCNT = set(["\x00\xFC\xFF\xFF", "\xFF\xFF\xFC\x00",
"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF",
"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00"])
# Page types
PAGE_META = 0
PAGE_DATA = 256 # 1 << 8
PAGE_MIX = [512, 640] # 1 << 9, 1 << 9 | 1 << 7
PAGE_AMD = 1024 # 1 << 10
PAGE_METC = 16384 # 1 << 14 (compressed data)
PAGE_COMP = -28672 # ~(1 << 14 | 1 << 13 | 1 << 12)
PAGE_MIX_DATA = PAGE_MIX + [PAGE_DATA]
PAGE_META_MIX_AMD = [PAGE_META] + PAGE_MIX + [PAGE_AMD]
PAGE_ANY = PAGE_META_MIX_AMD + [PAGE_DATA, PAGE_METC, PAGE_COMP]
def __init__(self, path, logLevel=logging.INFO, formatters=None):
if logLevel == logging.DEBUG:
sys.excepthook = _debug
self.path = path
self.logger = self._makeLogger(level=logLevel)
self.header = self._readHeader()
self.logger.debug(str(self.header))
self.formatters = formatters or {}
def _makeLogger(self, level=logging.INFO):
"""
Create a custom logger with the specified properties.
"""
logger = logging.getLogger(self.path)
logger.setLevel(level)
formatter = logging.Formatter("%(message)s", "%y-%m-%d %H:%M:%S")
streamHandler = logging.StreamHandler()
if platform.system() != 'Windows':
streamHandler.emit = _getColorEmit(streamHandler.emit)
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
return logger
def checkMagicNumber(self, header):
return header[:len(self.MAGIC)] == self.MAGIC
def readVal(self, fmt, h, start, size):
newfmt = fmt
if fmt == 's':
newfmt = '%ds' % size
elif fmt == 'numeric':
newfmt = 'd'
if size < 8:
if self.endian == 'little':
h = '\x00' * (8 - size) + h
else:
h += '\x00' * (8 - size)
size = 8
if self.endian == 'big':
newfmt = '>%s' % newfmt
else:
newfmt = '<%s' % newfmt
val = struct.unpack(newfmt, h[start:start + size])[0]
if fmt == 's':
val = val.strip('\x00')
return val
def readColumnAttributes(self, colattr):
info = []
Info = namedtuple('ColumnAttributes', ['offset', 'length', 'type'])
inc = 16 if self.u64 else 12
for subh in colattr:
if self.u64:
attrs = subh.raw[16:16 + ((subh.length - 28) / 16) * 16]
else:
attrs = subh.raw[12:12 + ((subh.length - 20) / 12) * 12]
for i in xrange(0, len(attrs), inc):
pointer = attrs[i:i + inc]
if self.u64:
offset = self.readVal('q', pointer, 0, 8)
length = self.readVal('i', pointer, 8, 4)
ctype = self.readVal('b', pointer, 14, 1)
else:
offset = self.readVal('i', pointer, 0, 4)
length = self.readVal('i', pointer, 4, 4)
ctype = self.readVal('b', pointer, 10, 1)
assert ctype in (1, 2)
ctype = 'numeric' if ctype == 1 else 'character'
info.append(Info(offset, length, ctype))
return info
def readColumnNames(self, colname, coltext):
info = []
inc = 8 if self.u64 else 4
for subh in colname:
if self.u64:
attrs = subh.raw[16:16 + ((subh.length - 28) / 8) * 8]
else:
attrs = subh.raw[12:12 + ((subh.length - 20) / 8) * 8]
for i in xrange(0, len(attrs), 8):
pointer = attrs[i:i + 8]
txt = self.readVal('h', pointer, 0, 2)
offset = self.readVal('h', pointer, 2, 2) + inc
length = self.readVal('h', pointer, 4, 2)
info.append(
self.readVal('s', coltext[txt].raw, offset, length)
)
return info
def readColumnLabels(self, collabs, coltext, colcount):
Info = namedtuple('ColumnLabels', ['format', 'label'])
if len(collabs) < 1:
return [Info('', '')] * colcount
info = []
inc = 8 if self.u64 else 4
for subh in collabs:
base = 46 if self.u64 else 34
txt = self.readVal('h', subh.raw, base, 2)
offset = self.readVal('h', subh.raw, base + 2, 2) + inc
length = self.readVal('h', subh.raw, base + 4, 2)
fmt = ''
if length > 0:
fmt = self.readVal('s', coltext[txt].raw, offset, length)
base = 52 if self.u64 else 40
txt = self.readVal('h', subh.raw, base, 2)
offset = self.readVal('h', subh.raw, base + 2, 2) + inc
length = self.readVal('h', subh.raw, base + 4, 2)
label = ''
if length > 0:
label = self.readVal('s', coltext[txt].raw, offset, length)
info.append(Info(fmt, label))
return info or [Info('', '')] * colcount
def readPages(self, f, pagecount, pagesize):
# Read pages
Page = namedtuple('Page', ['number', 'data', 'type', 'blockcount',
'subheadercount'])
for i in xrange(pagecount):
page = f.read(pagesize)
ptype = self.readVal('h', page, 32 if self.u64 else 16, 2)
blockcount = 0
subhcount = 0
if ptype in self.PAGE_META_MIX_AMD:
blockcount = self.readVal('h', page, 34 if self.u64 else 18, 2)
subhcount = self.readVal('h', page, 36 if self.u64 else 20, 2)
yield Page(i, page, ptype, blockcount, subhcount)
def readSubheaders(self, f, pagecount, pagesize):
SubHeader = namedtuple('SubHeader', ['page', 'offset', 'length', 'raw',
'signature', 'compression'])
oshp = 40 if self.u64 else 24
lshp = 24 if self.u64 else 12
lshf = 8 if self.u64 else 4
dtype = 'q' if self.u64 else 'i'
for page in self.readPages(f, pagecount, pagesize):
if page.type not in self.PAGE_META_MIX_AMD:
continue
pointers = page.data[oshp:oshp + (page.subheadercount * lshp)]
for i in xrange(0, len(pointers), lshp):
pointer = pointers[i:i + lshp]
offset = self.readVal(dtype, pointer, 0, lshf)
length = self.readVal(dtype, pointer, lshf, lshf)
comp = self.readVal('b', pointer, lshf * 2, 1)
if length > 0:
raw = page.data[offset:offset + length]
signature = raw[:8 if self.u64 else 4]
if comp == 0:
comp = None
elif comp == 1:
comp = 'ignore'
elif comp == 4:
comp = 'rle'
else:
self.logger.error('[%s] unknown compression type: %d',
os.path.basename(self.path), comp)
yield SubHeader(page.number, offset, length,
raw, signature, comp)
def _readHeader(self):
fields = ['headerlength', 'endian', 'platform', 'datecreated',
'dataset', 'datemodified', 'pagesize', 'pagecount',
'sasrelease', 'sashost', 'osversion', 'osmaker', 'osname',
'u64', 'rowcount', 'colcount', 'cols', 'rowcountfp',
'rowlength', 'filename', 'compression', 'creator',
'creatorproc']
Info = namedtuple('SAS7BDAT_Header', fields)
def _repr(self):
cols = [['Num', 'Name', 'Type', 'Length', 'Format', 'Label']]
align = ['>', '<', '<', '>', '<', '<']
colwidth = [len(x) for x in cols[0]]
for i, col in enumerate(self.cols, 1):
tmp = [i, col.name, col.attr.type, col.attr.length,
col.label.format, col.label.label]
cols.append(tmp)
for j, val in enumerate(tmp):
colwidth[j] = max(colwidth[j], len(str(val)))
rows = [' '.join('{0:{1}}'.format(x, colwidth[i])
for i, x in enumerate(cols[0]))]
rows.append(' '.join('-' * colwidth[i]
for i in xrange(len(align))))
for row in cols[1:]:
rows.append(' '.join(
'{0:{1}{2}}'.format(x, align[i], colwidth[i])
for i, x in enumerate(row))
)
cols = '\n'.join(rows)
hdr = 'Header:\n%s' % '\n'.join(
['\t%s: %s' % (k, v)
for k, v in sorted(self._asdict().iteritems())
if v != '' and k not in ('cols', 'rowcountfp', 'rowlength',
'data')]
)
return '%s\n\nContents of dataset "%s":\n%s\n' % (
hdr, self.dataset, cols
)
Info.__repr__ = _repr
Column = namedtuple('Column', ['name', 'attr', 'label'])
with open(self.path, 'rb') as f:
# Check magic number
h = f.read(288)
if len(h) < 288:
self.logger.error("[%s] header too short (not a sas7bdat "
"file?)", os.path.basename(self.path))
return
if not self.checkMagicNumber(h):
self.logger.error("[%s] magic number mismatch",
os.path.basename(self.path))
return
# Check for 32 or 64 bit alignment
if h[32] == '\x33':
align2 = 4
u64 = True
else:
align2 = 0
u64 = False
if h[35] == '\x33':
align1 = 4
else:
align1 = 0
# Check endian
if h[37] == '\x01':
endian = 'little'
else:
endian = 'big'
# Check platform
plat = h[39]
if plat == '1':
plat = 'unix'
elif plat == '2':
plat = 'windows'
else:
plat = 'unknown'
| |
'''
Created on 15 de sep de 2016
Last Modified on 26 de set de 2019
@author: romuere, <NAME>
'''
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pycbir.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog, QMessageBox, QLabel, QComboBox
from PyQt5.QtGui import QIcon, QPixmap
import numpy as np
import glob
import csv
import os
class Ui_pyCBIR(object):
def setupUi(self, pyCBIR):
pyCBIR.setObjectName("pyCBIR")
pyCBIR.resize(946, 730)
self.centralwidget = QtWidgets.QWidget(pyCBIR)
self.centralwidget.setObjectName("centralwidget")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(3, 0, 131, 141))
self.groupBox.setObjectName("groupBox")
self.radioButton = QtWidgets.QRadioButton(self.groupBox)
self.radioButton.setGeometry(QtCore.QRect(3, 20, 101, 20))
self.radioButton.setObjectName("radioButton")
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_2.setGeometry(QtCore.QRect(3, 40, 97, 18))
self.radioButton_2.setObjectName("radioButton_2")
self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_3.setGeometry(QtCore.QRect(3, 60, 141, 18))
self.radioButton_3.setObjectName("radioButton_3")
self.radioButton_3.setChecked(True)
self.radioButton_4 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_4.setGeometry(QtCore.QRect(3, 80, 97, 18))
self.radioButton_4.setObjectName("radioButton_4")
self.radioButton_5 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_5.setGeometry(QtCore.QRect(3, 100, 97, 18))
self.radioButton_5.setObjectName("radioButton_5")
#self.radioButton_5.setChecked(True)
self.radioButton_6 = QtWidgets.QRadioButton(self.groupBox)
self.radioButton_6.setGeometry(QtCore.QRect(3, 120, 97, 18))
self.radioButton_6.setObjectName("radioButton_6")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(3, 140, 131, 88))
self.groupBox_2.setObjectName("groupBox_2")
self.radioButton_7 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_7.setGeometry(QtCore.QRect(3, 20, 101, 20))
self.radioButton_7.setObjectName("radioButton_7")
self.radioButton_7.setChecked(True)
self.radioButton_8 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_8.setGeometry(QtCore.QRect(3, 40, 97, 20))
self.radioButton_8.setObjectName("radioButton_8")
self.radioButton_9 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_9.setGeometry(QtCore.QRect(3, 60, 97, 20))
self.radioButton_9.setObjectName("radioButton_9")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(3, 230, 131, 471))
self.groupBox_3.setObjectName("groupBox_3")
self.label = QtWidgets.QLabel(self.groupBox_3)
self.label.setGeometry(QtCore.QRect(3, 20, 111, 16))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit.setGeometry(QtCore.QRect(42, 40, 50, 21))
self.lineEdit.setObjectName("lineEdit")
self.groupBox_4 = QtWidgets.QGroupBox(self.groupBox_3)
self.groupBox_4.setGeometry(QtCore.QRect(3, 130, 131, 141))
self.groupBox_4.setObjectName("groupBox_4")
self.lineEdit_3 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineEdit_3.setGeometry(QtCore.QRect(0, 50, 131, 21))
self.lineEdit_3.setObjectName("lineEdit_3")
self.pushButton_3 = QtWidgets.QPushButton(self.groupBox_4)
self.pushButton_3.setGeometry(QtCore.QRect(0, 20, 130, 32))
self.pushButton_3.setObjectName("pushButton_3")
self.lineEdit_4 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineEdit_4.setGeometry(QtCore.QRect(0, 110, 131, 21))
self.lineEdit_4.setObjectName("lineEdit_4")
self.pushButton_4 = QtWidgets.QPushButton(self.groupBox_4)
self.pushButton_4.setGeometry(QtCore.QRect(0, 80, 130, 32))
self.pushButton_4.setObjectName("pushButton_4")
self.groupBox_5 = QtWidgets.QGroupBox(self.groupBox_3)
self.groupBox_5.setGeometry(QtCore.QRect(-1, 280, 131, 141))
self.groupBox_5.setObjectName("groupBox_5")
self.lineEdit_5 = QtWidgets.QLineEdit(self.groupBox_5)
self.lineEdit_5.setGeometry(QtCore.QRect(3, 50, 131, 21))
self.lineEdit_5.setObjectName("lineEdit_5")
self.lineEdit_6 = QtWidgets.QLineEdit(self.groupBox_5)
self.lineEdit_6.setGeometry(QtCore.QRect(0, 110, 131, 21))
self.lineEdit_6.setObjectName("lineEdit_6")
self.pushButton_5 = QtWidgets.QPushButton(self.groupBox_5)
self.pushButton_5.setGeometry(QtCore.QRect(0, 20, 130, 32))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(self.groupBox_5)
self.pushButton_6.setGeometry(QtCore.QRect(0, 80, 130, 32))
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton = QtWidgets.QPushButton(self.groupBox_3)
self.pushButton.setGeometry(QtCore.QRect(10, 431, 110, 40))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.groupBox_3)
self.pushButton_2.setGeometry(QtCore.QRect(10, 70, 110, 32))
self.pushButton_2.setObjectName("pushButton_2")
self.lineEdit.setText("10")
self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_2.setGeometry(QtCore.QRect(0, 100, 131, 21))
self.lineEdit_2.setObjectName("lineEdit_2")
self.line_2 = QtWidgets.QFrame(self.groupBox_3)
self.line_2.setGeometry(QtCore.QRect(0, 120, 131, 16))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_3 = QtWidgets.QFrame(self.groupBox_3)
self.line_3.setGeometry(QtCore.QRect(0, 273, 131, 10))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.line_4 = QtWidgets.QFrame(self.groupBox_3)
self.line_4.setGeometry(QtCore.QRect(0, 420, 131, 10))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.line_5 = QtWidgets.QFrame(self.centralwidget)
self.line_5.setGeometry(QtCore.QRect(130, 0, 20, 681))
self.line_5.setFrameShape(QtWidgets.QFrame.VLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
pyCBIR.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(pyCBIR)
self.statusbar.setObjectName("statusbar")
pyCBIR.setStatusBar(self.statusbar)
self.retranslateUi(pyCBIR)
QtCore.QMetaObject.connectSlotsByName(pyCBIR)
#variables
#defining defaults
self.feature_extraction_method = 'fotf'
self.similarity_metric = 'ed'
self.list_of_parameters = []
self.path_cnn_pre_trained = ''
self.path_cnn_trained = ''
self.path_save_cnn = ''
#features
self.radioButton.clicked.connect(self.radio_clicked)
self.radioButton_2.clicked.connect(self.radio2_clicked)
self.radioButton_4.clicked.connect(self.radio4_clicked)
self.radioButton_5.clicked.connect(self.radio5_clicked)
self.radioButton_6.clicked.connect(self.radio6_clicked)
#metrics
self.radioButton_7.clicked.connect(self.radio7_clicked)
#output
self.pushButton_2.clicked.connect(self.returnPathOutput)
#data
self.pushButton_3.clicked.connect(self.loadDatabasePath)
self.pushButton_4.clicked.connect(self.loadRetrievalPath)
self.pushButton_5.clicked.connect(self.loadDatabaseFile)
self.pushButton_6.clicked.connect(self.loadRetrievalFile)
#run pyCBIR
self.pushButton.clicked.connect(self.returnInformation)
#show results
#----------------------Output------------------------------#
def returnPathOutput(self,pyCBIR):
cwd = os.getcwd()
file = QFileDialog.getExistingDirectory(None,'Select the path output', cwd)
self.lineEdit_2.setText(str(file+'/'))
#----------------------Load data---------------------------#
def loadDatabaseFile(self,pyCBIR):
cwd = self.lineEdit_2.text()
file = QFileDialog.getOpenFileName(None,'Open file', cwd,'CSV Files (*.csv)')
self.lineEdit_5.setText(str(file[0]))
def loadRetrievalFile(self,pyCBIR):
file = QFileDialog.getOpenFileName(None,'Open file', self.lineEdit_5.text(),'CSV Files (*.csv)')
self.lineEdit_6.setText(str(file[0]))
def loadDatabasePath(self,pyCBIR):
cwd = self.lineEdit_2.text()
file = QFileDialog.getExistingDirectory(None,'Open path', cwd)
self.lineEdit_3.setText(str(file+'/'))
def loadRetrievalPath(self,pyCBIR):
file = QFileDialog.getExistingDirectory(None,'Open path', self.lineEdit_6.text())
self.lineEdit_4.setText(str(file+'/'))
#----------------------GLCM Parameters---------------------#
def radio_clicked(self,pyCBIR):
self.mySubwindow=subwindow()
self.mySubwindow.createWindow(200,120)
self.mySubwindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.label1_glcm = QtWidgets.QLabel(self.mySubwindow)
self.label1_glcm.setGeometry(QtCore.QRect(55, 0, 100, 16))
self.label1_glcm.setObjectName("label1")
self.label1_glcm.setText("GLCM options")
#GLCM parameters
self.label2_glcm = QtWidgets.QLabel(self.mySubwindow)
self.label2_glcm.setGeometry(QtCore.QRect(0, 30, 70, 16))
self.label2_glcm.setObjectName("label2")
self.label2_glcm.setText("Distance:")
self.lineEdit1_glcm = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit1_glcm.setGeometry(QtCore.QRect(75, 30, 30, 16))
self.lineEdit1_glcm.setObjectName("lineEdit")
self.lineEdit1_glcm.setText("1")
self.label3_glcm = QtWidgets.QLabel(self.mySubwindow)
self.label3_glcm.setGeometry(QtCore.QRect(0, 50, 110, 16))
self.label3_glcm.setObjectName("label3")
self.label3_glcm.setText("GrayLevels_old:")
self.lineEdit2_glcm = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit2_glcm.setGeometry(QtCore.QRect(115, 50, 30, 16))
self.lineEdit2_glcm.setObjectName("lineEdit")
self.lineEdit2_glcm.setText("8")
self.label4_glcm = QtWidgets.QLabel(self.mySubwindow)
self.label4_glcm.setGeometry(QtCore.QRect(0, 70, 110, 16))
self.label4_glcm.setObjectName("label4")
self.label4_glcm.setText("GrayLevels_new:")
self.lineEdit3_glcm = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit3_glcm.setGeometry(QtCore.QRect(115, 70, 30, 16))
self.lineEdit3_glcm.setObjectName("lineEdit")
self.lineEdit3_glcm.setText("8")
self.buttom_glcm = QtWidgets.QPushButton(self.mySubwindow)
self.buttom_glcm.setText("Ok")
self.buttom_glcm.setGeometry(QtCore.QRect(50, 100, 100, 16))
self.buttom_glcm.clicked.connect(self.b_glcm)
self.mySubwindow.show()
def b_glcm(self):
self.list_of_parameters = [self.lineEdit1_glcm.text(),self.lineEdit2_glcm.text(),self.lineEdit3_glcm.text()]
self.mySubwindow.close()
#----------------------HOG Parameters---------------------#
def radio2_clicked(self,pyCBIR):
self.mySubwindow=subwindow()
self.mySubwindow.createWindow(200,90)
self.mySubwindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.label1_hog = QtWidgets.QLabel(self.mySubwindow)
self.label1_hog.setGeometry(QtCore.QRect(55, 0, 100, 16))
self.label1_hog.setObjectName("label1")
self.label1_hog.setText("HOG options")
self.label2_hog = QtWidgets.QLabel(self.mySubwindow)
self.label2_hog.setGeometry(QtCore.QRect(0, 30, 70, 16))
self.label2_hog.setObjectName("label2")
self.label2_hog.setText("Cells:")
self.lineEdit1_hog = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit1_hog.setGeometry(QtCore.QRect(75, 30, 30, 16))
self.lineEdit1_hog.setObjectName("lineEdit")
self.lineEdit1_hog.setText("3")
self.label3_hog = QtWidgets.QLabel(self.mySubwindow)
self.label3_hog.setGeometry(QtCore.QRect(0, 50, 110, 16))
self.label3_hog.setObjectName("label3")
self.label3_hog.setText("Blocks:")
self.lineEdit2_hog = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit2_hog.setGeometry(QtCore.QRect(75, 50, 30, 16))
self.lineEdit2_hog.setObjectName("lineEdit")
self.lineEdit2_hog.setText("3")
self.buttom_hog = QtWidgets.QPushButton(self.mySubwindow)
self.buttom_hog.setText("Ok")
self.buttom_hog.setGeometry(QtCore.QRect(50, 70, 100, 16))
self.buttom_hog.clicked.connect(self.b_hog)
self.mySubwindow.show()
def b_hog(self):
self.list_of_parameters = [self.lineEdit1_hog.text(),self.lineEdit2_hog.text()]
self.mySubwindow.close()
#----------------------LBP Parameters---------------------#
def radio4_clicked(self,pyCBIR):
self.mySubwindow=subwindow()
self.mySubwindow.createWindow(200,90)
self.mySubwindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.label1_lbp = QtWidgets.QLabel(self.mySubwindow)
self.label1_lbp.setGeometry(QtCore.QRect(55, 0, 100, 16))
self.label1_lbp.setObjectName("label1")
self.label1_lbp.setText("LBP options")
self.label2_lbp = QtWidgets.QLabel(self.mySubwindow)
self.label2_lbp.setGeometry(QtCore.QRect(0, 30, 70, 16))
self.label2_lbp.setObjectName("label2")
self.label2_lbp.setText("Neighbors:")
self.lineEdit1_lbp = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit1_lbp.setGeometry(QtCore.QRect(75, 30, 30, 16))
self.lineEdit1_lbp.setObjectName("lineEdit")
self.lineEdit1_lbp.setText("16")
self.label3_lbp = QtWidgets.QLabel(self.mySubwindow)
self.label3_lbp.setGeometry(QtCore.QRect(0, 50, 110, 16))
self.label3_lbp.setObjectName("label3")
self.label3_lbp.setText("Radio:")
self.lineEdit2_lbp = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit2_lbp.setGeometry(QtCore.QRect(75, 50, 30, 16))
self.lineEdit2_lbp.setObjectName("lineEdit")
self.lineEdit2_lbp.setText("2")
self.buttom_lbp = QtWidgets.QPushButton(self.mySubwindow)
self.buttom_lbp.setText("Ok")
self.buttom_lbp.setGeometry(QtCore.QRect(50, 70, 100, 16))
self.buttom_lbp.clicked.connect(self.b_lbp)
self.mySubwindow.show()
def b_lbp(self):
self.list_of_parameters = [self.lineEdit1_lbp.text(),self.lineEdit2_lbp.text()]
self.mySubwindow.close()
#----------------------CNN Parameters---------------------#
def radio5_clicked(self,pyCBIR):
self.mySubwindow=subwindow()
self.mySubwindow.createWindow(400,200)
self.mySubwindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.groupBox_ = QtWidgets.QGroupBox(self.mySubwindow)
self.groupBox_.setGeometry(QtCore.QRect(0, 20, 400, 20))
self.groupBox_.setObjectName("groupBox_")
self.rb1 = QtWidgets.QRadioButton(self.groupBox_)
self.rb1.setGeometry(QtCore.QRect(0, 0, 100, 20))
self.rb1.setObjectName("rb1")
self.rb1.setChecked(True)
self.rb2 = QtWidgets.QRadioButton(self.groupBox_)
self.rb2.setGeometry(QtCore.QRect(120, 0, 150, 20))
self.rb2.setObjectName("rb2")
self.rb3 = QtWidgets.QRadioButton(self.groupBox_)
self.rb3.setGeometry(QtCore.QRect(270, 0, 150, 20))
self.rb3.setObjectName("rb3")
self.rb1.clicked.connect(self.rb1_clicked)
self.rb2.clicked.connect(self.rb2_clicked)
self.rb3.clicked.connect(self.rb3_clicked)
self.rb1.setText("Train CNN")
self.rb2.setText("Fine-Tuning CNN")
self.rb3.setText("Pre-Trained CNN")
self.label_cnn_type = QtWidgets.QLabel(self.mySubwindow)
self.label_cnn_type.setGeometry(QtCore.QRect(0, 55, 150, 16))
self.label_cnn_type.setObjectName("label1")
self.label_cnn_type.setText("CNN Architecture: ")
self.comboBox = QComboBox(self.mySubwindow)
self.comboBox.addItem("lenet")
self.comboBox.addItem("nasnet")
self.comboBox.addItem("inception_resnet")
self.comboBox.addItem("vgg16")
self.comboBox.addItem("inception_v4")
self.comboBox.setGeometry(QtCore.QRect(130, 50, 120, 25))
self.label1 = QtWidgets.QLabel(self.mySubwindow)
self.label1.setGeometry(QtCore.QRect(180, 0, 100, 16))
self.label1.setObjectName("label1")
self.label1.setText("CNN options")
#CNN parameters
self.label2 = QtWidgets.QLabel(self.mySubwindow)
self.label2.setGeometry(QtCore.QRect(0, 100, 50, 16))
self.label2.setObjectName("label2")
self.label2.setText("Epochs:")
self.lineEdit1 = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit1.setGeometry(QtCore.QRect(55, 100, 30, 16))
self.lineEdit1.setObjectName("lineEdit")
self.lineEdit1.setText("1")
self.label3 = QtWidgets.QLabel(self.mySubwindow)
self.label3.setGeometry(QtCore.QRect(120, 100, 100, 16))
self.label3.setObjectName("label3")
self.label3.setText("Learning Rate:")
self.lineEdit2 = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit2.setGeometry(QtCore.QRect(210, 100, 50, 16))
self.lineEdit2.setObjectName("lineEdit")
self.lineEdit2.setText("0.01")
self.label4 = QtWidgets.QLabel(self.mySubwindow)
self.label4.setGeometry(QtCore.QRect(290, 100, 70, 16))
self.label4.setObjectName("label4")
self.label4.setText("Decay:")
self.lineEdit3 = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit3.setGeometry(QtCore.QRect(340, 100, 50, 16))
self.lineEdit3.setObjectName("lineEdit")
self.lineEdit3.setText("0.04")
self.buttom_ok = QtWidgets.QPushButton(self.mySubwindow)
self.buttom_ok.setText("Ok")
self.buttom_ok.setGeometry(QtCore.QRect(180, 150, 70, 50))
self.buttom_ok.clicked.connect(self.b_cnn)
self.mySubwindow.show()
def rb1_clicked(self,pyCBIR):
self.lineEdit1.show()
self.lineEdit2.show()
self.lineEdit3.show()
def rb2_clicked(self,pyCBIR):
self.lineEdit1.show()
self.lineEdit2.show()
self.lineEdit3.show()
def rb3_clicked(self,pyCBIR):
self.lineEdit1.hide()
self.lineEdit2.hide()
self.lineEdit3.hide()
def b_cnn(self):
if self.rb1.isChecked(): #treinar cnn
self.feature_extraction_method = self.comboBox.currentText()
cwd = os.getcwd()
lr = self.lineEdit2.text()
lr = lr.replace('.', '')
file_name = self.comboBox.currentText()+'_epochs_'+self.lineEdit1.text()+'_learningRate_'+lr+'.h5'
#file_name = 'model.ckpt'
try:
epocas = int(self.lineEdit1.text())
if epocas == 0:
QMessageBox.information(None,'pyCBIR', 'Invalid number of epochs!')
#self.buttom_ok.clicked.connect(self.radio5_clicked)
else:
if self.feature_extraction_method == 'lenet':
QMessageBox.information(None,'pyCBIR', 'Now you have to choose the place to save the trained model.')
self.path_save_cnn = QFileDialog.getSaveFileName(None,'Save File',file_name)[0]
self.list_of_parameters = [self.lineEdit2.text(),self.lineEdit1.text()]#learning rate and epochs
self.mySubwindow.close()
except ValueError:
QMessageBox.information(None,'pyCBIR', 'Invalid number of epochs!')
#if self.lineEdit1.text() is '0':
# self.path_cnn_trained = QFileDialog.getOpenFileName(None,'Select the file of the pre-trained CNN: ', cwd,"Model Files (*.h5)")
#else:
# self.path_cnn_trained = QFileDialog.getSaveFileName(None,'Save File',file_name,filter = 'h5 (*.h5)')[0]
elif self.rb2.isChecked():#fine tuning
self.feature_extraction_method = 'fine_tuning_'+self.comboBox.currentText()
buttonReply = QMessageBox.question(None,'pyCBIR', 'Do you want to load a .h5 file?',QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
cwd = os.getcwd()
self.path_cnn_pre_trained = QFileDialog.getOpenFileName(None,'Select a .h5 file!',cwd,filter = 'h5 (*.h5)')[0]
else:
self.path_cnn_pre_trained = ''
QMessageBox.information(None,'pyCBIR', 'Now you have to choose the place to save the fine tuning model.')
self.path_save_cnn = QFileDialog.getExistingDirectory(None,'Open path')
self.path_save_cnn = self.path_save_cnn + '/model_fine_tuning.h5'
#if self.feature_extraction_method == 'fine_tuning_lenet':
# QMessageBox.information(None,'pyCBIR', 'Now you have to choose the pre-trained file.')
# cwd = os.getcwd()
# self.path_cnn_pre_trained = QFileDialog.getOpenFileName(None,'Select the file of the pre-trained CNN: ', cwd,"Model Files (*.ckpt)")
self.list_of_parameters = [self.lineEdit2.text(),self.lineEdit1.text()]#learning rate and epochs
self.mySubwindow.close()
else: #pre-treinada
self.feature_extraction_method = 'pretrained_'+self.comboBox.currentText()
if self.feature_extraction_method == 'pretrained_lenet':
QMessageBox.information(None,'pyCBIR', 'Now you have to choose the pre-trained file.')
cwd = os.getcwd()
self.path_cnn_pre_trained = QFileDialog.getOpenFileName(None,'Select the file of the pre-trained CNN: ', cwd,"Model Files (*.h5)")[0]
self.feature_extraction_method = 'pretrained_'+self.comboBox.currentText()
else:
buttonReply = QMessageBox.question(None,'pyCBIR', 'Do you want to use imageNet weights?',QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if buttonReply == QMessageBox.No:
cwd = os.getcwd()
QMessageBox.information(None,'pyCBIR', 'Now you have to choose the pre-trained file.')
self.path_cnn_pre_trained = QFileDialog.getOpenFileName(None,'Select a .h5 file!',cwd,filter = 'h5 (*.h5)')[0]
self.list_of_parameters = [self.lineEdit2.text(),self.lineEdit1.text()]#learning rate and epochs
self.mySubwindow.close()
print(self.feature_extraction_method)
#----------------------Daisy Parameters---------------------#
def radio6_clicked(self,pyCBIR):
self.mySubwindow=subwindow()
self.mySubwindow.createWindow(200,140)
self.mySubwindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.label1_daisy = QtWidgets.QLabel(self.mySubwindow)
self.label1_daisy.setGeometry(QtCore.QRect(55, 0, 100, 16))
self.label1_daisy.setObjectName("label1")
self.label1_daisy.setText("Daisy options")
#GLCM parameters
self.label2_daisy = QtWidgets.QLabel(self.mySubwindow)
self.label2_daisy.setGeometry(QtCore.QRect(0, 30, 70, 16))
self.label2_daisy.setObjectName("label2")
self.label2_daisy.setText("Step:")
self.lineEdit1_daisy = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit1_daisy.setGeometry(QtCore.QRect(45, 30, 30, 16))
self.lineEdit1_daisy.setObjectName("lineEdit")
self.lineEdit1_daisy.setText("4")
self.label3_daisy = QtWidgets.QLabel(self.mySubwindow)
self.label3_daisy.setGeometry(QtCore.QRect(0, 50, 110, 16))
self.label3_daisy.setObjectName("label3")
self.label3_daisy.setText("Rings:")
self.lineEdit2_daisy = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit2_daisy.setGeometry(QtCore.QRect(45, 50, 30, 16))
self.lineEdit2_daisy.setObjectName("lineEdit")
self.lineEdit2_daisy.setText("3")
self.label4_daisy = QtWidgets.QLabel(self.mySubwindow)
self.label4_daisy.setGeometry(QtCore.QRect(0, 70, 110, 16))
self.label4_daisy.setObjectName("label4")
self.label4_daisy.setText("Histogram:")
self.lineEdit3_daisy = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit3_daisy.setGeometry(QtCore.QRect(85, 70, 30, 16))
self.lineEdit3_daisy.setObjectName("lineEdit")
self.lineEdit3_daisy.setText("2")
self.label5_daisy = QtWidgets.QLabel(self.mySubwindow)
self.label5_daisy.setGeometry(QtCore.QRect(0, 90, 110, 16))
self.label5_daisy.setObjectName("label4")
self.label5_daisy.setText("Orientations:")
self.lineEdit4_daisy = QtWidgets.QLineEdit(self.mySubwindow)
self.lineEdit4_daisy.setGeometry(QtCore.QRect(85, 90, 30, 16))
self.lineEdit4_daisy.setObjectName("lineEdit")
self.lineEdit4_daisy.setText("8")
self.buttom_daisy = QtWidgets.QPushButton(self.mySubwindow)
self.buttom_daisy.setText("Ok")
self.buttom_daisy.setGeometry(QtCore.QRect(50, 115, 100, 16))
self.buttom_daisy.clicked.connect(self.b_daisy)
self.mySubwindow.show()
def b_daisy(self):
self.list_of_parameters = [self.lineEdit1_daisy.text(),self.lineEdit2_daisy.text(),self.lineEdit3_daisy.text(),self.lineEdit4_daisy.text()]
self.mySubwindow.close()
#-----------------Brute Force-----------------#
def radio7_clicked(self,pyCBIR):
self.mySubwindow=subwindow()
self.mySubwindow.createWindow(200,230)
self.mySubwindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint)
self.label1_bf = QtWidgets.QLabel(self.mySubwindow)
self.label1_bf.setGeometry(QtCore.QRect(55, 0, 120, 16))
self.label1_bf.setObjectName("label1")
self.label1_bf.setText("Similarity Metrics")
self.gp = QtWidgets.QGroupBox(self.mySubwindow)
self.radioButton_bf_1 = QtWidgets.QRadioButton(self.gp)
self.radioButton_bf_1.setGeometry(QtCore.QRect(13, 29, 132, 18))
self.radioButton_bf_1.setChecked(True)
self.radioButton_bf_2 = QtWidgets.QRadioButton(self.gp)
self.radioButton_bf_2.setGeometry(QtCore.QRect(13, 46, 132, 18))
self.radioButton_bf_3 = QtWidgets.QRadioButton(self.gp)
self.radioButton_bf_3.setGeometry(QtCore.QRect(13, 63, 129, 18))
self.radioButton_bf_4 = QtWidgets.QRadioButton(self.gp)
self.radioButton_bf_4.setGeometry(QtCore.QRect(13, 80, 141, 18))
self.radioButton_bf_5 = QtWidgets.QRadioButton(self.gp)
self.radioButton_bf_5.setGeometry(QtCore.QRect(13, 97, 164, 18))
self.radioButton_bf_6 = | |
self.face_halfedges(fkey):
self.halfedge[u][v] = None
del self.face[fkey]
if fkey in self.facedata:
del self.facedata[fkey]
for nbr in nbrs:
del self.halfedge[nbr][key]
edge = "-".join(map(str, sorted([nbr, key])))
if edge in self.edgedata:
del self.edgedata[edge]
for nbr in nbrs:
for n in self.vertex_neighbors(nbr):
if self.halfedge[nbr][n] is None and self.halfedge[n][nbr] is None:
del self.halfedge[nbr][n]
del self.halfedge[n][nbr]
edge = "-".join(map(str, sorted([nbr, n])))
if edge in self.edgedata:
del self.edgedata[edge]
del self.halfedge[key]
del self.vertex[key]
def delete_face(self, fkey):
"""Delete a face from the mesh object.
Parameters
----------
fkey : int
The identifier of the face.
Notes
-----
In some cases, disconnected vertices can remain after application of this
method. To remove these vertices as well, combine this method with vertex
culling (:meth:`cull_vertices`).
Examples
--------
>>>
"""
for u, v in self.face_halfedges(fkey):
self.halfedge[u][v] = None
if self.halfedge[v][u] is None:
del self.halfedge[u][v]
del self.halfedge[v][u]
edge = "-".join(map(str, sorted([u, v])))
if edge in self.edgedata:
del self.edgedata[edge]
del self.face[fkey]
if fkey in self.facedata:
del self.facedata[fkey]
def remove_unused_vertices(self):
"""Remove all unused vertices from the mesh object.
"""
for u in list(self.vertices()):
if u not in self.halfedge:
del self.vertex[u]
else:
if not self.halfedge[u]:
del self.vertex[u]
del self.halfedge[u]
cull_vertices = remove_unused_vertices
# --------------------------------------------------------------------------
# accessors
# --------------------------------------------------------------------------
def vertices(self, data=False):
"""Iterate over the vertices of the mesh.
Parameters
----------
data : bool, optional
Return the vertex data as well as the vertex keys.
Yields
------
int or tuple
The next vertex identifier, if ``data`` is false.
The next vertex as a (key, attr) tuple, if ``data`` is true.
"""
for key in self.vertex:
if not data:
yield key
else:
yield key, self.vertex_attributes(key)
def faces(self, data=False):
"""Iterate over the faces of the mesh.
Parameters
----------
data : bool, optional
Return the face data as well as the face keys.
Yields
------
int or tuple
The next face identifier, if ``data`` is ``False``.
The next face as a (fkey, attr) tuple, if ``data`` is ``True``.
"""
for key in self.face:
if not data:
yield key
else:
yield key, self.face_attributes(key)
def edges(self, data=False):
"""Iterate over the edges of the mesh.
Parameters
----------
data : bool, optional
Return the edge data as well as the edge vertex keys.
Yields
------
tuple
The next edge as a (u, v) tuple, if ``data`` is false.
The next edge as a ((u, v), data) tuple, if ``data`` is true.
Notes
----
Mesh edges have no topological meaning. They are only used to store data.
Edges are not automatically created when vertices and faces are added to
the mesh. Instead, they are created when data is stored on them, or when
they are accessed using this method.
This method yields the directed edges of the mesh.
Unless edges were added explicitly using :meth:`add_edge` the order of
edges is *as they come out*. However, as long as the toplogy remains
unchanged, the order is consistent.
Examples
--------
>>>
"""
seen = set()
for u in self.halfedge:
for v in self.halfedge[u]:
key = u, v
ikey = v, u
if key in seen or ikey in seen:
continue
seen.add(key)
seen.add(ikey)
if not data:
yield key
else:
yield key, self.edge_attributes(key)
def vertices_where(self, conditions, data=False):
"""Get vertices for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
Yield the vertices and their data attributes.
Default is ``False``.
Yields
------
key: hashable
The next vertex that matches the condition.
2-tuple
The next vertex and its attributes, if ``data=True``.
"""
for key, attr in self.vertices(True):
is_match = True
for name, value in conditions.items():
method = getattr(self, name, None)
if callable(method):
val = method(key)
if isinstance(val, list):
if value not in val:
is_match = False
break
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
else:
if name not in attr:
is_match = False
break
if isinstance(attr[name], list):
if value not in attr[name]:
is_match = False
break
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if attr[name] < minval or attr[name] > maxval:
is_match = False
break
else:
if value != attr[name]:
is_match = False
break
if is_match:
if data:
yield key, attr
else:
yield key
def vertices_where_predicate(self, predicate, data=False):
"""Get vertices for which a certain condition or set of conditions is true using a lambda function.
Parameters
----------
predicate : callable
The condition you want to evaluate. The callable takes 2 parameters: ``key``, ``attr`` and should return ``True`` or ``False``.
data : bool, optional
Yield the vertices and their data attributes.
Default is ``False``.
Yields
------
key: hashable
The next vertex that matches the condition.
2-tuple
The next vertex and its attributes, if ``data=True``.
Examples
--------
>>>
"""
for key, attr in self.vertices(True):
if predicate(key, attr):
if data:
yield key, attr
else:
yield key
def edges_where(self, conditions, data=False):
"""Get edges for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
Yield the edges and their data attributes.
Default is ``False``.
Yields
------
2-tuple
The next edge as a (u, v) tuple, if ``data=False``.
3-tuple
The next edge as a (u, v, data) tuple, if ``data=True``.
"""
for key in self.edges():
is_match = True
attr = self.edge_attributes(key)
for name, value in conditions.items():
method = getattr(self, name, None)
if method and callable(method):
val = method(key)
elif name in attr:
val = attr[name]
else:
is_match = False
break
if isinstance(val, list):
if value not in val:
is_match = False
break
elif isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
if is_match:
if data:
yield key, attr
else:
yield key
def edges_where_predicate(self, predicate, data=False):
"""Get edges for which a certain condition or set of conditions is true using a lambda function.
Parameters
----------
predicate : callable
The condition you want to evaluate. The callable takes 3 parameters: ``u``, ``v``, ``attr`` and should return ``True`` or ``False``.
data : bool, optional
Yield the vertices and their data attributes.
Default is ``False``.
Yields
------
2-tuple
The next edge as a (u, v) tuple, if ``data=False``.
3-tuple
The next edge as a (u, v, data) tuple, if ``data=True``.
Examples
--------
>>>
"""
for key, attr in self.edges(True):
if predicate(key, attr):
if data:
yield key, attr
else:
yield key
def faces_where(self, conditions, data=False):
"""Get faces for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
Yield the faces and their data attributes.
Default is ``False``.
Yields
------
key: hashable
The next face that matches the condition.
2-tuple
The next face and its attributes, if ``data=True``.
"""
for fkey in self.faces():
is_match = True
attr = self.face_attributes(fkey)
for name, value in conditions.items():
method = getattr(self, name, None)
if method and callable(method):
val = method(fkey)
elif name in attr:
val = attr[name]
else:
is_match = False
break
if isinstance(val, list):
if value not in val:
is_match = False
break
elif isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
if is_match:
if | |
import numpy as np
import scipy.stats
from ..base_parameters import (
ParamHelper, PriorHelper, PrecondHelper,
get_value_func, get_hyperparam_func, get_dim_func,
set_value_func, set_hyperparam_func,
)
from .._utils import (
normal_logpdf,
matrix_normal_logpdf,
pos_def_mat_inv,
varp_stability_projection,
tril_vector_to_mat,
)
import logging
logger = logging.getLogger(name=__name__)
## Implementations of Vector, Square, Rectangular Parameters
# Single Square
class VectorParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n = np.shape(kwargs[self.name])
if np.ndim(kwargs[self.name]) != 1:
raise ValueError("{} must be vector".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
mu = np.reshape(vector[vector_index:vector_index+n], (n))
var_dict[self.name] = mu
return vector_index+n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is a {1} vector".format(
self.name, self.dim_names[0]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name)
self._var_col_name = 'var_col_{0}'.format(name)
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
if not np.isscalar(kwargs[self._var_col_name]):
raise ValueError("{} must be scalar".format(self._var_col_name))
else:
raise ValueError("{} must be provided".format(self._var_col_name))
prior._set_check_dim(**{self.dim_names[0]: n})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
var_dict[self.name] = np.random.multivariate_normal(
mean=mean_mu,
cov=var_col_mu*pos_def_mat_inv(Qinv),
)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinv = tril_vector_to_mat(var_dict[self._lt_vec_name])
Qinv = LQinv.dot(LQinv.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinv = np.eye(self.prior[self.dim_names[0]])
S_prevprev = var_col_mu**-1 + \
sufficient_stat[self.name]['S_prevprev']
S_curprev = mean_mu * var_col_mu**-1 + \
sufficient_stat[self.name]['S_curprev']
post_mean_mu = S_curprev/S_prevprev
var_dict[self.name] = np.random.multivariate_normal(
mean=post_mean_mu,
cov=pos_def_mat_inv(Qinv)/S_prevprev,
)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
LQinv = tril_vector_to_mat(parameters.var_dict[self._lt_vec_name])
else:
LQinv = np.eye(prior.dim[self.dim_names[0]])
logprior += normal_logpdf(parameters.var_dict[self.name],
mean=mean_mu,
Lprec=var_col_mu_k**-0.5 * LQinv,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
mu = getattr(parameters, self.name)
if self._var_row_name is not None:
Qinv = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinv = np.eye(prior.dim[self.dim_names[0]])
grad[self.name] = -1.0 * np.dot(var_col_mu**-1 * Qinv, mu - mean_mu)
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = var
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
var = kwargs['var']
mean_mu = np.zeros((n))
var_col_mu = var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.dot(Q, grad[self.name])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.linalg.solve(LQinv.T,
np.random.normal(loc=0, size=(LQinv.shape[0]))
)
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Multiple Square
class VectorsParamHelper(ParamHelper):
def __init__(self, name='mu', dim_names=None):
self.name = name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
num_states, n = np.shape(kwargs[self.name])
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('fixed') is not None:
param.var_dict[self.name] = name_kwargs['fixed'].copy()
return
def from_dict_to_vector(self, vector_list, var_dict, **kwargs):
vector_list.append(var_dict[self.name].flatten())
return
def from_vector_to_dict(self, var_dict, vector, vector_index, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
mu = np.reshape(vector[vector_index:vector_index+num_states*n],
(num_states, n))
var_dict[self.name] = mu
return vector_index+num_states*n
def get_properties(self):
properties = {}
properties[self.name] = property(
fget=get_value_func(self.name),
fset=set_value_func(self.name),
doc="{0} is {2} {1} vectors".format(
self.name, self.dim_names[0], self.dim_names[1]),
)
for dim_name in self.dim_names:
properties[dim_name] = property(
fget=get_dim_func(dim_name),
)
return properties
class VectorsPriorHelper(PriorHelper):
def __init__(self, name='mu', dim_names=None, var_row_name=None):
self.name = name
self._mean_name = 'mean_{0}'.format(name) # num_states by n
self._var_col_name = 'var_col_{0}'.format(name) # num_states by n
self._var_row_name = var_row_name
self._lt_vec_name = 'L{0}inv_vec'.format(var_row_name)
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def set_hyperparams(self, prior, **kwargs):
if self._mean_name in kwargs:
num_states, n = np.shape(kwargs[self._mean_name])
else:
raise ValueError("{} must be provided".format(self._mean_name))
if self._var_col_name in kwargs:
num_states2 = np.size(kwargs[self._var_col_name])
else:
raise ValueError("{} must be provided".format(self._var_col_name))
if (num_states != num_states2):
raise ValueError("prior dimensions don't match")
prior._set_check_dim(**{self.dim_names[0]: n,
self.dim_names[1]: num_states})
prior.hyperparams[self._mean_name] = kwargs[self._mean_name]
prior.hyperparams[self._var_col_name] = kwargs[self._var_col_name]
return
def sample_prior(self, prior, var_dict, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + \
1e-9*np.eye(prior.dim[self.dim_names[0]])
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(prior.dim[self.dim_names[0]])
for _ in prior.dim[self.dim_names[1]]])
mus = [None for k in range(prior.dim[self.dim_names[1]])]
for k in range(len(mus)):
mus[k] = np.random.multivariate_normal(
mean=mean_mu[k],
cov=var_col_mu[k]*pos_def_mat_inv(Qinvs[k]),
)
var_dict[self.name] = np.array(mus)
return
def sample_posterior(self, prior, var_dict, sufficient_stat, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
if self._lt_vec_name in var_dict:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in var_dict[self._lt_vec_name]])
Qinvs = np.array([LQinv_k.dot(LQinv_k.T) + 1e-9*np.eye(n)
for LQinv_k in LQinvs])
else:
raise ValueError("Missing {}\n".format(self._lt_vec_name) +
"Perhaps {} must be earlier in _prior_helper_list".format(
self._var_row_name)
)
else:
Qinvs = np.array([np.eye(n) for _ in range(num_states)])
mus = [None for k in range(num_states)]
for k in range(len(mus)):
S_prevprev = var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_prevprev'][k]
S_curprev = mean_mu[k] * var_col_mu[k]**-1 + \
sufficient_stat[self.name]['S_curprev'][k]
post_mean_mu_k = S_curprev/S_prevprev
mus[k] = np.random.multivariate_normal(
mean=post_mean_mu_k,
cov=pos_def_mat_inv(Qinvs[k])/S_prevprev,
)
var_dict[self.name] = np.array(mus)
return
def logprior(self, prior, logprior, parameters, **kwargs):
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
LQinvs = np.array([tril_vector_to_mat(LQinv_vec_k)
for LQinv_vec_k in parameters.var_dict[self._lt_vec_name]])
else:
LQinvs = np.array([np.eye(n)
for _ in range(num_states)])
for mu_k, mean_mu_k, var_col_mu_k, LQinv_k in zip(
parameters.var_dict[self.name], mean_mu, var_col_mu, LQinvs):
logprior += normal_logpdf(mu_k,
mean=mean_mu_k,
Lprec=var_col_mu_k**-0.5 * LQinv_k,
)
return logprior
def grad_logprior(self, prior, grad, parameters, **kwargs):
mu = parameters.var_dict[self.name]
mean_mu = prior.hyperparams[self._mean_name]
var_col_mu = prior.hyperparams[self._var_col_name]
num_states, n = np.shape(mean_mu)
if self._var_row_name is not None:
Qinvs = getattr(parameters, '{}inv'.format(self._var_row_name))
else:
Qinvs = np.array([np.eye(n)
for _ in range(num_states)])
grad[self.name] = np.array([
-1.0 * np.dot(var_col_mu[k]**-1 * Qinvs[k], mu[k] - mean_mu[k])
for k in range(num_states)])
return
def get_prior_kwargs(self, prior_kwargs, parameters, **kwargs):
var = kwargs['var']
mu = getattr(parameters, self.name)
if kwargs.get('from_mean', False):
mean_mu = mu.copy()
else:
mean_mu = np.zeros_like(mu)
var_col_mu = np.array([
var for _ in range(A.shape[0])
])
prior_kwargs[self._mean_name] = mean_mu
prior_kwargs[self._var_col_name] = var_col_mu
return
def get_default_kwargs(self, default_kwargs, **kwargs):
n = kwargs[self.dim_names[0]]
num_states = kwargs[self.dim_names[1]]
var = kwargs['var']
mean_mu = np.zeros((num_states, n))
var_col_mu = np.ones((num_states))*var
default_kwargs[self._mean_name] = mean_mu
default_kwargs[self._var_col_name] = var_col_mu
return
class VectorsPrecondHelper(PrecondHelper):
def __init__(self, name='mu', dim_names=None, var_row_name='Q'):
self.name = name
self._var_row_name = var_row_name
self.dim_names = ['n', 'num_states'] if dim_names is None else dim_names
return
def precondition(self, preconditioner,
precond_grad, grad, parameters, **kwargs):
Q = getattr(parameters, self._var_row_name)
precond_grad[self.name] = np.array([
np.dot(Q[k], grad[self.name][k])
for k in range(Q.shape[0])
])
return
def precondition_noise(self, preconditioner,
noise, parameters, **kwargs):
LQinv = getattr(parameters, "L{}inv".format(self._var_row_name))
noise[self.name] = np.array([
np.linalg.solve(LQinv[k].T,
np.random.normal(loc=0, size=LQinv.shape[-1])
)
for k in range(LQinv.shape[0])
])
return
def correction_term(self, preconditioner, correction, parameters, **kwargs):
correction[self.name] = np.zeros_like(getattr(parameters, self.name),
dtype=float)
return
# Single Square
class SquareMatrixParamHelper(ParamHelper):
def __init__(self, name='A', dim_names=None):
self.name = name
self.dim_names = ['n'] if dim_names is None else dim_names
return
def set_var(self, param, **kwargs):
if self.name in kwargs:
n, n2 = np.shape(kwargs[self.name])
if n != n2:
raise ValueError("{} must be square matrices".format(self.name))
param.var_dict[self.name] = np.array(kwargs[self.name]).astype(float)
param._set_check_dim(**{self.dim_names[0]: n})
else:
raise ValueError("{} not provided".format(self.name))
return
def project_parameters(self, param, **kwargs):
name_kwargs = kwargs.get(self.name, {})
if name_kwargs.get('thresh', True):
A = param.var_dict[self.name]
A = varp_stability_projection(A,
eigenvalue_cutoff=name_kwargs.get(
'eigenvalue_cutoff', 0.9999),
var_name=self.name,
logger=logger)
param.var_dict[self.name] = A
| |
size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
dilation: dilation of the 2D convolution operation. Default: 1
groups: number of groups into which the input and output channels are divided,
so as to perform a ``grouped convolution``. When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by groups,
and the shape of weight should be ``(groups, in_channels // groups,
out_channels // groups, height, width)``. Default: 1
conv_mode: supports "cross_correlation". Default: "cross_correlation"
compute_mode: when set to "default", no special requirements will be
placed on the precision of intermediate results. When set to "float32",
"float32" would be used for accumulator and intermediate result, but only
effective when input and output are of float16 dtype.
Returns:
output tensor.
"""
assert (
conv_mode.lower() == "cross_correlation"
or conv_mode.name == "CROSS_CORRELATION"
)
if amp._enabled:
compute_mode = "float32"
inp, weight, bias = cast_tensors(inp, weight, bias)
else:
dtype = dtype_promotion(inp, weight)
if inp.dtype != dtype:
inp = inp.astype(dtype)
if weight.dtype != dtype:
weight = weight.astype(dtype)
if groups != 1:
raise NotImplementedError("group transposed conv2d is not supported yet.")
stride_h, stride_w = expand_hw(stride)
pad_h, pad_w = expand_hw(padding)
dilate_h, dilate_w = expand_hw(dilation)
op = builtin.ConvolutionBackwardData(
stride_h=stride_h,
stride_w=stride_w,
pad_h=pad_h,
pad_w=pad_w,
dilate_h=dilate_h,
dilate_w=dilate_w,
strategy=get_execution_strategy(),
compute_mode=compute_mode,
)
(output,) = apply(op, weight, inp)
if bias is not None:
output += bias
return output
def deformable_conv2d(
inp: Tensor,
weight: Tensor,
offset: Tensor,
mask: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="cross_correlation",
compute_mode="default",
) -> Tensor:
r"""Deformable Convolution.
Args:
inp: input feature map.
weight: convolution kernel.
offset: input offset to kernel, channel of this tensor should match the deformable settings.
mask: input mask to kernel, channel of this tensor should match the deformable settings.
bias: bias added to the result of convolution (if given).
stride: stride of the 2D convolution operation. Default: 1
padding: size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
dilation: dilation of the 2D convolution operation. Default: 1
groups: number of groups into which the input and output channels are divided,
so as to perform a ``grouped convolution``. When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by groups,
and the shape of weight should be ``(groups, out_channel // groups,
in_channels // groups, height, width)``. Default: 1
conv_mode: supports "cross_correlation". Default: "cross_correlation"
compute_mode: when set to "default", no special requirements will be
placed on the precision of intermediate results. When set to "float32",
"float32" would be used for accumulator and intermediate result, but only
effective when input and output are of float16 dtype.
Returns:
output tensor.
"""
assert (
conv_mode.lower() == "cross_correlation"
or conv_mode.name == "CROSS_CORRELATION"
)
if amp._enabled:
compute_mode = "float32"
inp, weight, offset, mask, bias = cast_tensors(inp, weight, offset, mask, bias)
else:
offset = offset.astype("float32")
mask = mask.astype("float32")
stride_h, stride_w = expand_hw(stride)
pad_h, pad_w = expand_hw(padding)
dilate_h, dilate_w = expand_hw(dilation)
sparse_type = "dense" if groups == 1 else "group"
op = builtin.DeformableConv(
stride_h=stride_h,
stride_w=stride_w,
pad_h=pad_h,
pad_w=pad_w,
dilate_h=dilate_h,
dilate_w=dilate_w,
strategy=get_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
(output,) = apply(op, inp, weight, offset, mask)
if bias is not None:
output += bias
return output
def local_conv2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
conv_mode="cross_correlation",
):
r"""Applies spatial 2D convolution over an groupped channeled image with untied kernels."""
assert (
conv_mode.lower() == "cross_correlation"
or conv_mode.name == "CROSS_CORRELATION"
)
stride_h, stride_w = expand_hw(stride)
pad_h, pad_w = expand_hw(padding)
dilate_h, dilate_w = expand_hw(dilation)
dtype = dtype_promotion(inp, weight)
if inp.dtype != dtype:
inp = inp.astype(dtype)
if weight.dtype != dtype:
weight = weight.astype(dtype)
op = builtin.GroupLocal(
stride_h=stride_h,
stride_w=stride_w,
pad_h=pad_h,
pad_w=pad_w,
dilate_h=dilate_h,
dilate_w=dilate_w,
mode=conv_mode,
sparse="dense",
)
(output,) = apply(op, inp, weight)
if bias is not None:
output += bias
return output
def conv_transpose3d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int, int]] = 1,
padding: Union[int, Tuple[int, int, int]] = 0,
dilation: Union[int, Tuple[int, int, int]] = 1,
) -> Tensor:
r"""3D transposed convolution operation. Only support the case that groups = 1
and conv_mode = "cross_correlation".
Refer to :class:`~.ConvTranspose3d` for more information.
Args:
inp: feature map of the convolution operation.
weight: convolution kernel.
weight usually has shape ``(in_channels, out_channels, depth, height, width)``.
bias: bias added to the result of convolution (if given).
stride: stride of the 3D convolution operation. Default: 1
padding: size of the paddings added to the input on all sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
dilation: dilation of the 3D convolution operation. Default: 1
Returns:
output tensor.
"""
D, H, W = 0, 1, 2
pad = _triple(padding)
stride = _triple_nonzero(stride)
dilate = _triple_nonzero(dilation)
dtype = dtype_promotion(inp, weight)
if inp.dtype != dtype:
inp = inp.astype(dtype)
if weight.dtype != dtype:
weight = weight.astype(dtype)
op = builtin.Convolution3DBackwardData(
pad_d=pad[D],
pad_h=pad[H],
pad_w=pad[W],
stride_d=stride[D],
stride_h=stride[H],
stride_w=stride[W],
dilate_d=dilate[D],
dilate_h=dilate[H],
dilate_w=dilate[W],
strategy=get_execution_strategy(),
)
(output,) = apply(op, weight, inp)
if bias is not None:
output += bias
return output
def max_pool2d(
inp: Tensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
) -> Tensor:
r"""Applies a 2D max pooling over an input tensor.
Refer to :class:`~.MaxPool2d` for more information.
Args:
inp: input tensor.
kernel_size: size of the window.
stride: stride of the window. If not provided, its value is set to kernel_size.
Default: None
padding: implicit zero padding added on both sides. Default: 0
Returns:
output tensor.
"""
if stride is None:
stride = kernel_size
window_h, window_w = _pair_nonzero(kernel_size)
stride_h, stride_w = _pair_nonzero(stride)
padding_h, padding_w = _pair(padding)
op = builtin.Pooling(
window_h=window_h,
window_w=window_w,
stride_h=stride_h,
stride_w=stride_w,
pad_h=padding_h,
pad_w=padding_w,
mode="max",
)
(output,) = apply(op, inp)
return output
def avg_pool2d(
inp: Tensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
mode: str = "average_count_exclude_padding",
) -> Tensor:
r"""Applies 2D average pooling over an input tensor.
Refer to :class:`~.AvgPool2d` for more information.
Args:
inp: input tensor.
kernel_size: size of the window.
stride: stride of the window. If not provided, its value is set to ``kernel_size``.
Default: None
padding: implicit zero padding added on both sides. Default: 0
mode: whether to count padding values, set to "average" will do counting.
Default: "average_count_exclude_padding"
Returns:
output tensor.
"""
if stride is None:
stride = kernel_size
window_h, window_w = _pair_nonzero(kernel_size)
stride_h, stride_w = _pair_nonzero(stride)
padding_h, padding_w = _pair(padding)
op = builtin.Pooling(
window_h=window_h,
window_w=window_w,
stride_h=stride_h,
stride_w=stride_w,
pad_h=padding_h,
pad_w=padding_w,
mode=mode,
)
(output,) = apply(op, inp)
return output
def adaptive_max_pool2d(
inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
) -> Tensor:
r"""Applies a 2D max adaptive pooling over an input.
Refer to :class:`~.MaxAdaptivePool2d` for more information.
Args:
inp: input tensor.
oshp: OH, OW)` size of the output shape.
Returns:
output tensor.
"""
if isinstance(oshp, int):
oshp = (oshp, oshp)
op = builtin.AdaptivePooling(mode="max", format="NCHW",)
oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
(output,) = apply(op, inp, oshp)
return output
def adaptive_avg_pool2d(
inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
) -> Tensor:
r"""Applies a 2D average adaptive pooling over an input.
Refer to :class:`~.AvgAdaptivePool2d` for more information.
Args:
inp: input tensor.
oshp: OH, OW)` size of the output shape.
Returns:
output tensor.
"""
if isinstance(oshp, int):
oshp = (oshp, oshp)
op = builtin.AdaptivePooling(mode="average", format="NCHW",)
oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
(output,) = apply(op, inp, oshp)
return output
def deformable_psroi_pooling(
inp: Tensor,
rois: Tensor,
trans: Tensor,
no_trans: bool,
part_size: int,
pooled_h: int,
pooled_w: int,
sample_per_part: int,
spatial_scale: float,
trans_std: float = 0.1,
):
r"""Deformable PSROI(Position Sensitive Region of Interest) Pooling.
Args:
inp: input feature map.
rois: the rois for feature pooling.
trans: input offset to psroi_pooling.
no_trans: check the phase of DeformablePSROIPooling. False to the
1st phase, True to the 2nd phase.
part_size: part size.
sample_per_part: sample points of each part.
pooled_shape: kernel shape of convolution.
spatial_scale: | |
<reponame>johnhany/MegEngine
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
import functools
import itertools
from typing import Union
import numpy as np
import megengine._internal as mgb
from .graph import _use_default_if_none, get_default_graph
def wrap_io_tensor(func):
r"""A wrapper to make ``func`` compatible with functions in ``_internal.opr``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
comp_graph = None
for i in itertools.chain(args, kwargs.values()):
if isinstance(i, Tensor) and i._comp_graph:
comp_graph = i._comp_graph
break
else:
comp_graph = get_default_graph()
new_args = (
arg._attach(comp_graph) if isinstance(arg, Tensor) else arg for arg in args
)
new_kwargs = {
k: v._attach(comp_graph) if isinstance(v, Tensor) else v
for k, v in kwargs.items()
}
ret = func(*new_args, **new_kwargs)
if isinstance(ret, mgb.SymbolVar):
ret = Tensor(ret)
elif isinstance(ret, list):
ret = [Tensor(t) if isinstance(t, mgb.SymbolVar) else t for t in ret]
elif isinstance(ret, tuple):
ret = tuple(Tensor(t) if isinstance(t, mgb.SymbolVar) else t for t in ret)
return ret
return wrapper
def _wrap_symbolvar_binary_op(f):
@functools.wraps(f)
def wrapped(self, other):
comp_graph = (
isinstance(other, Tensor)
and other._comp_graph
or self._comp_graph
or get_default_graph()
)
if isinstance(other, Tensor):
other = other._attach(comp_graph)
return Tensor(f(self._attach(comp_graph), other))
return wrapped
def wrap_slice(inp):
start = inp.start._symvar if isinstance(inp.start, Tensor) else inp.start
stop = inp.stop._symvar if isinstance(inp.stop, Tensor) else inp.stop
step = inp.step._symvar if isinstance(inp.step, Tensor) else inp.step
return slice(start, stop, step)
def wrap_idx(idx):
if not isinstance(idx, tuple):
idx = (idx,)
idx = tuple(i._symvar if isinstance(i, Tensor) else i for i in idx)
idx = tuple(wrap_slice(i) if isinstance(i, slice) else i for i in idx)
return idx
class MGBIndexWrapper:
def __init__(self, dest, mgb_index, val=None):
self.dest = dest
self.val = val
self.mgb_index = mgb_index
def __getitem__(self, idx):
if self.val is None:
return wrap_io_tensor(self.mgb_index(self.dest._symvar).__getitem__)(
wrap_idx(idx)
)
else:
return wrap_io_tensor(
self.mgb_index(self.dest._symvar, self.val._symvar).__getitem__
)(wrap_idx(idx))
class Tensor:
r"""The main data container in MegEngine.
Use :func:`~.tensor` to create a Tensor with existed data.
"""
requires_grad = False
grad = None
def __init__(self, val=None, *, requires_grad=None):
self._reset(val, requires_grad=requires_grad)
def _reset(self, val=None, *, requires_grad=None):
if val is None:
self.__val = None
self.__sym = None
elif isinstance(val, mgb.SharedND):
self.__val = val
self.__sym = None
elif isinstance(val, mgb.SymbolVar):
self.__val = None
self.__sym = val
else:
raise TypeError("must be initialized with SymbolVar or SharedND")
self.requires_grad = requires_grad
def _as_tensor(self, obj):
r"""Convert the data into a ``Tensor``. If the data is already a Tensor
with the same dtype and device, no copy will be performed. Otherwise a
new Tensor will be returned with computational graph retained.
"""
if isinstance(obj, Tensor):
return obj
if isinstance(obj, mgb.SymbolVar):
return Tensor(obj)
if isinstance(obj, mgb.SharedScalar):
return Tensor(obj._as_sym_var(self._comp_graph, self._comp_node))
return tensor(data=obj, device=self.device)
def numpy(self):
r"""Return the tensor value in numpy.ndarray format.
"""
if self.__val is not None:
assert self.__sym is None
return self.__val.get_value()
if self.__sym is None:
raise ValueError("uninitialized")
if self.__sym.eager_val is not None:
return self.__sym.eager_val.get_value()
return self.__sym.inferred_value
def item(self):
return self.numpy().item()
def _attach(self, comp_graph, *, volatile=True):
if self.__val:
return self.__val.symvar(comp_graph, volatile=volatile)
if self.__sym:
if self.__sym.owner_graph != comp_graph:
raise RuntimeError("internal error")
return self.__sym
else:
raise ValueError("uninitialized")
@property
def _symvar(self):
if self.__sym:
assert not self.__val
return self.__sym
if not self.__val:
raise ValueError("uninitialized")
return self._attach(get_default_graph())
def __mgb_symvar__(self, comp_graph=None, **_):
if self.__val and comp_graph:
return self._attach(comp_graph)
return self._symvar # read by mgb.opr
@property
def dtype(self):
r"""Return the data type of the tensor.
"""
if self.__val is not None:
return self.__val.dtype
return self._symvar.dtype
@property
def _comp_node(self):
if self.__val is not None:
return self.__val.comp_node
return self._symvar.comp_node
device = _comp_node
@property
def _comp_graph(self):
if self.__sym is not None:
return self.__sym.owner_graph
return None
@property
def shape(self):
r"""Return an int tuple that is the shape/layout of the tensor.
Could be invalid in static graph mode.
"""
from ..jit import trace
if trace._active_instance: # pylint: disable=protected-access
# NOTE: this is an hack
shape = mgb.opr.get_var_shape(self._symvar)
return tuple(Tensor(shape[i]) for i in range(self.ndim))
return self._symvar.imm_shape
def set_value(self, value, *, sync=True, inplace=False, share=False):
r"""Set value to the tensor.
"""
if not self.__val:
raise ValueError("not detached")
if isinstance(value, Tensor):
value = value.__val or value.__sym.eager_val
self.__val.set_value(value, sync=sync, inplace=inplace, share=share)
def fill(self, value):
r"""Fills the tensor with the specified value.
"""
self.set_value(np.full(self.shape, value, dtype=self.dtype))
def reset_zero(self):
r"""Reset the tensor and fills with zeros.
"""
if not self.__val:
raise ValueError("not detached")
self.__val.reset_zero()
def to(self, device):
r"""Performs Tensor device conversion, returns Tensor with the specified device.
"""
return wrap_io_tensor(mgb.opr.copy)(self, comp_node=device)
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
# > If a class does not define an __eq__() method it should not define a
# > __hash__() operation either
__hash__ = None # type: ignore[assignment]
def __eq__(self, rhs):
rhs = self._as_tensor(rhs)
return Tensor(self._symvar._binary_opr("EQ", rhs._symvar))
def __ne__(self, rhs):
return 1 - self.__eq__(rhs)
def __len__(self):
if self._symvar.eager_val is not None:
return self._symvar.eager_val.shape[0]
raise TypeError(
"__len__ and __iter__ is not available for tensors on non eager graph."
)
__add__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__add__)
__radd__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__radd__)
__sub__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__sub__)
__rsub__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rsub__)
__mul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__mul__)
__rmul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rmul__)
__matmul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__matmul__)
__rmatmul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rmatmul__)
__lshift__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__lshift__)
__rshift__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rshift__)
__truediv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__truediv__)
__rtruediv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rtruediv__)
__floordiv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__floordiv__)
__rfloordiv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rfloordiv__)
__mod__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__mod__)
__rmod__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rmod__)
__pow__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__pow__)
__rpow__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rpow__)
__lt__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__lt__)
__gt__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__gt__)
__le__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__le__)
__ge__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__ge__)
__neg__ = wrap_io_tensor(mgb.SymbolVar.__neg__)
sum = wrap_io_tensor(mgb.SymbolVar.sum)
"""
Sum up the given tensors.
"""
max = wrap_io_tensor(mgb.SymbolVar.max)
"""
Return the maximum value of given tensor.
"""
min = wrap_io_tensor(mgb.SymbolVar.min)
"""
Return the minimum value of given tensor.
"""
prod = wrap_io_tensor(mgb.SymbolVar.prod)
"""
Return the product value of the given tensor.
"""
mean = wrap_io_tensor(mgb.SymbolVar.mean)
"""
Return the mean value of the given tensor.
"""
dimshuffle = wrap_io_tensor(mgb.SymbolVar.dimshuffle)
"""
See more details in :func:`~.functional.tensor.dimshuffle`.
"""
astype = wrap_io_tensor(mgb.SymbolVar.astype)
"""
Cast the tensor to a specified type.
"""
def reshape(self, *target_shape):
r"""Return a tensor which has given target shape
Examples:
.. testcode::
import numpy as np
from megengine import tensor
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4,4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1,16))
out = out.reshape(inp.shape)
print(out.numpy())
.. testoutput::
[[100 101 102 103]
[104 105 106 107]
[108 109 110 111]
[112 113 114 115]]
"""
if isinstance(target_shape[0], tuple):
if len(target_shape) > 1:
raise ValueError("Only single tuple is accepted in reshape")
target_shape = target_shape[0]
target_shape = (t._symvar if isinstance(t, Tensor) else t for t in target_shape)
return Tensor(mgb.SymbolVar.reshape(self._symvar, *target_shape))
def broadcast(self, *target_shape):
r"""Return a tesnor broadcasted by current tensor to given target shape
Examples:
.. testcode::
import numpy as np
from megengine import tensor
data = tensor(np.arange(100, 104, dtype=np.int32).reshape(1,4))
data = data.broadcast((4,4))
print(data.numpy())
.. testoutput::
[[100 101 102 103]
[100 101 102 103]
[100 101 102 103]
[100 101 102 103]]
"""
if isinstance(target_shape[0], tuple):
if len(target_shape) > 1:
raise ValueError("Only single tuple is accepted in broadcast")
target_shape = target_shape[0]
target_shape = (t._symvar if isinstance(t, Tensor) else t for t in target_shape)
return Tensor(mgb.SymbolVar.broadcast(self._symvar, *target_shape))
# Prefer operators on Tensor instead of convert to numpy
__array_priority__ = 1000
# mgb indexing family
def __getitem__(self, idx):
return wrap_io_tensor(self._symvar.__getitem__)(wrap_idx(idx))
def set_subtensor(self, val):
return MGBIndexWrapper(self, mgb.opr.set_subtensor, val)
def incr_subtensor(self, val):
return MGBIndexWrapper(self, mgb.opr.incr_subtensor, val)
@property
def ai(self):
return MGBIndexWrapper(self, mgb.opr.advanced_indexing)
def set_ai(self, val):
return MGBIndexWrapper(self, mgb.opr.set_advanced_indexing, val)
def incr_ai(self, val):
return MGBIndexWrapper(self, mgb.opr.incr_advanced_indexing, val)
@property
def mi(self):
return MGBIndexWrapper(self, mgb.opr.mesh_indexing)
def set_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.set_mesh_indexing, val)
def incr_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.incr_mesh_indexing, val)
@property
def batched_mi(self):
return MGBIndexWrapper(self, mgb.opr.batched_mesh_indexing)
def batched_set_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.batched_set_mesh_indexing, val)
def batched_incr_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.batched_incr_mesh_indexing, val)
def __array__(self, dtype=None):
if dtype is None:
return self.numpy()
else:
return self.numpy().astype(dtype, copy=False)
def __int__(self):
return int(self.item())
def __index__(self):
return int(self.item())
def __round__(self, ndigits=0):
if ndigits != 0:
raise ValueError("ndigits must be 0 for Tensor.round")
return Tensor(mgb.opr.elemwise([self._symvar], mode="ROUND"))
round = __round__
def sqrt(self):
r"""Return a tensor that each element is the square root of its
original value.
"""
return Tensor(mgb.opr.sqrt(self._symvar))
def shapeof(self, axis=None):
r"""Return a Tensor that represent the shape of the tensor.
"""
return Tensor(mgb.opr.get_var_shape(self._symvar, axis=axis))
@property
def | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 2 12:11:07 2017
@author: leo
Take table data and infer values that represent missing data to replace by
standard values for missing data.
Examples of missing data that can be found:
- 'XXX'
- 'NO_ADDR'
- '999999'
- 'NONE'
- '-'
TODO:
- For probable missing values, check entire file
"""
import string
import numpy as np
import pandas as pd
DEFAULT_THRESH = 0.6
def mv_from_letter_repetition(top_values, score=0.7):
"""Checks for unusual repetition of characters as in XX or 999999"""
# Compute number of unique characters for each value
num_unique_chars = pd.Series([len(set(list(x))) for x in top_values.index], index=top_values.index)
# Check that we have at least 3 distinct values
if len(num_unique_chars) >= 3:
# Keep as NaN candidate if value has only 1 unique character and
# at least two characters and other values have at least two disctinct
# characters
num_unique_chars.sort_values(inplace=True)
if (len(num_unique_chars.index[0]) > 1) \
and (num_unique_chars.iloc[0] == 1) \
and (num_unique_chars.iloc[1] >= 2):
return [(num_unique_chars.index[0], score, 'letter_repeted')]
return []
def mv_from_usual_forms(top_values, probable_missing_values, score=0.5):
"""Compares top values to common expressions for missing values"""
to_return = []
for val in top_values.index:
if val.lower() in [x.lower() for x in probable_missing_values]:
to_return.append((val, score, 'usual'))
return to_return
def mv_from_len_diff(top_values, score=0.3):
"""Check if all values have the same length except one"""
# Compute lengths of values
lengths = pd.Series([len(x) for x in top_values.index], index=top_values.index)
# Check if all values have the same length except one:
if (lengths.nunique() == 2) & (len(top_values) >= 4): # TODO: why ???
if lengths.value_counts().iloc[-1] == 1:
abnormal_length = lengths.value_counts().index[-1]
mv_value = lengths[lengths == abnormal_length].index[0]
return [(mv_value, score, 'diff')]
return []
def mv_from_len_ratio(top_values, score=0.2):
"""Check if one value is much shorter than others"""
# Compute lengths of values
lengths = pd.Series([len(x) for x in top_values.index], index=top_values.index)
if len(top_values) >= 4:
lengths.sort_values(inplace=True)
length_ratio = 2.9
if length_ratio * lengths.iloc[0] < lengths.iloc[1]:
mv_value = lengths.index[0]
return [(mv_value, score, 'len_ratio')]
return []
def mv_from_not_digit(top_values, score=1):
"""Check if one value is the only not digit (and is only text)"""
is_digit = pd.Series([x.replace(',', '').replace('.', '').isdigit()
for x in top_values.index], index=top_values.index)
if len(top_values) >= 3:
if (~is_digit).sum() == 1:
mv_value = is_digit[is_digit == False].index[0]
if mv_value.isalpha():
return [(mv_value, score/2. + score/2.*(len(top_values) >= 4),
'not_digit')]
return []
def mv_from_punctuation(top_values, score=1):
"""Check if value is only one with only punctuation"""
punct = string.punctuation + ' '
is_punct = pd.Series([all(y in punct for y in x) for x in top_values.index], index=top_values.index)
if (is_punct).sum() == 1:
mv_value = is_punct[is_punct].index[0]
return [(mv_value, score, 'punctuation')]
return []
def mv_from_common_values(all_top_values, score=0.5):
'''Looks for values common in at least two columns'''
# Create dict with: {value: set_of_columns_where_common} with values present in at least two columns
popular_values = dict()
for col_1, top_values_1 in all_top_values.items():
for col_2, top_values_2 in all_top_values.items():
if col_1 != col_2:
common_values = [x for x in top_values_1.index if x in top_values_2.index]
for val in common_values:
if val not in popular_values:
popular_values[val] = set([col_1, col_2])
else:
popular_values[val].add(col_1)
popular_values[val].add(col_2)
if popular_values:
# Questionable heuristic: return value most frequent
temp = [(val, len(cols)) for val, cols in popular_values.items()]
temp.sort(key=lambda x: x[1], reverse=True)
mv_value = temp[0][0]
return [(mv_value, score, 'common_values')]
return []
def mv_from_common_values_2(col_mvs, score=1):
"""
Return mv candidates for missing values that are already candidates in
at least two columns.
"""
# Make dict with key: mv_candidate value: list of columns where applicable
val_mvs = dict()
for col, tuples in col_mvs.items():
for (val, score, origin) in tuples:
if val not in val_mvs:
val_mvs[val] = [col]
else:
val_mvs[val].append(col)
return [(val, score, 'common_values') for val, cols in val_mvs.items() if (len(cols)>=2)]
def compute_all_top_values(tab, num_top_values):
'''
Returns a dict with columns of the table as keys and the top 10 most
frequent values in a pandas Series
'''
all_top_values = dict()
for col in tab.columns:
all_top_values[col] = tab[col].value_counts(True).head(num_top_values)
return all_top_values
def correct_score(list_of_possible_mvs, probable_mvs):
"""
Corrects original scores by comparing string distance to probable_mvs
INPUT:
list_of_possible_mvs: ex: [(mv, 0.3), (branch, 0.2)]
probable_mvs: ex ['nan', 'none']
OUTPUT:
list_of_possible_mvs: ex[(nan, 0.9), (branch, 0.1)]
"""
# Sum scores for same values detected by different methods in same column
new_list_of_possible_mvs_tmp = dict()
for (val, coef, orig) in list_of_possible_mvs:
if val not in new_list_of_possible_mvs_tmp:
new_list_of_possible_mvs_tmp[val] = dict()
new_list_of_possible_mvs_tmp[val]['score'] = coef
new_list_of_possible_mvs_tmp[val]['origin'] = [orig]
else:
new_list_of_possible_mvs_tmp[val]['score'] += coef
new_list_of_possible_mvs_tmp[val]['origin'].append(orig)
# NB: Taken care of in mv_from_usual_forms
# # If the value is a known form of mv, increase probability
# if val.lower() in [x.lower() for x in probable_mvs]:
# new_list_of_possible_mvs_tmp[val] += 0.5
# Reformat output like input
new_list_of_possible_mvs = []
for val, _dict in new_list_of_possible_mvs_tmp.items():
new_list_of_possible_mvs.append((val, _dict['score'], _dict['origin']))
return new_list_of_possible_mvs
def infer_mvs(tab, params=None):
"""
API MODULE
Run mv inference processes for each column and for the entire table
"""
PROBABLE_MVS = ['nan', 'none', 'na', 'n/a', '\\n', ' ', 'non renseigne', \
'nr', 'no value', 'null', 'missing value']
ALWAYS_MVS = ['']
if params is None:
params = {}
# Set variables and replace by default values
PROBABLE_MVS.extend(params.get('probable_mvs', []))
ALWAYS_MVS.extend(params.get('always_mvs', []))
num_top_values = params.get('num_top_values', 10)
# Compute most frequent values per column
all_top_values = compute_all_top_values(tab, num_top_values)
col_mvs = dict()
# Look at each column and infer mv
for col, top_values in all_top_values.items():
col_mvs[col] = []
if (not top_values.any()) or (top_values.iloc[0] == 1):
continue
col_mvs[col].extend(mv_from_len_diff(top_values))
col_mvs[col].extend(mv_from_len_ratio(top_values))
col_mvs[col].extend(mv_from_not_digit(top_values))
col_mvs[col].extend(mv_from_punctuation(top_values))
col_mvs[col].extend(mv_from_usual_forms(top_values, PROBABLE_MVS))
col_mvs[col].extend(mv_from_usual_forms(top_values, ALWAYS_MVS, 10**3))
col_mvs[col].extend(mv_from_letter_repetition(top_values))
col_mvs[col] = correct_score(col_mvs[col], PROBABLE_MVS)
col_mvs[col].sort(key=lambda x: x[1], reverse=True)
# Transfer output to satisfy API standards
def triplet_to_dict(val):
return {'val': val[0], 'score': val[1], 'origin': val[2]}
common_mvs = [triplet_to_dict(val) for val in mv_from_common_values_2(col_mvs)]
columns_mvs = {key:[triplet_to_dict(val) for val in vals] for key, vals in col_mvs.items()}
infered_mvs = {'columns': columns_mvs, 'all': common_mvs}
return {'mvs_dict': infered_mvs, 'thresh': 0.6} # TODO: remove harcode
def replace_mvs(tab, params):
"""
API MODULE
Replace the values that should be mvs by actual np.nan. Values in 'all'
will be replaced in the entire table whereas values in 'columns' will only
be replaced in the specified columns.
INPUT:
tab: pandas DataFrame to modify
params:
mvs_dict: dict indicating mv values with scores. For example:
{
'all': [],
'columns': {'dech': [('-', 2.0, 'unknown')],
'distance': [('-', 1, 'unknown')]}
}
thresh: minimum score to remove mvs
OUTPUT:
tab: same table with values replaced by np.nan
modified: Indicate if value was modified
"""
# Set variables and replace by default values
mvs_dict = params['mvs_dict']
thresh = params.get('thresh', DEFAULT_THRESH)
# Replace
assert sorted(list(mvs_dict.keys())) == ['all', 'columns']
# Run information
modified = pd.DataFrame(False, index=tab.index, columns=tab.columns)
for mv in mvs_dict['all']:
val, score = mv['val'], mv['score']
# run_info['replace_num']['all'][val] = 0
if score >= thresh:
# Metrics
modified = modified | (tab == val)
# Do transformation
tab.replace(val, np.nan, inplace=True)
for col, mv_values in mvs_dict['columns'].items():
for mv in mv_values:
val, score = mv['val'], mv['score']
if score >= thresh:
# Metrics
if tab[col].notnull().any():
modified[col] = modified[col] | (tab[col] == val)
# Do transformation
tab[col].replace(val, np.nan, inplace=True)
return tab, modified
def sample_mvs_ilocs(tab, params, sample_params):
'''
Displays interesting rows following inference
INPUT:
- tab: the pandas DataFrame on which inference was performed
- params: the result of infer_mvs
- sample_params:
- randomize: (default: True)
- num_per_missing_val_to_display: for each missing value found,
how many examples to display
OUTPUT:
- row_idxs: index values of rows to display
'''
# Select rows to display based on result
num_per_missing_val_to_display = sample_params.get('num_per_missing_val_to_display', 4)
randomize = sample_params.get('randomize', True)
thresh = params.get('thresh', DEFAULT_THRESH)
# TODO: add for ALL
row_idxs = []
for col, mvs in params['mvs_dict']['columns'].items():
if col in tab.columns:
for mv in mvs:
if mv['score'] >= thresh:
sel = (tab[col] == mv['val']).astype(int).diff().fillna(1).astype(bool)
sel.index = range(len(sel))
row_idxs.extend(list(sel[sel].index)[:num_per_missing_val_to_display])
for mv in params['mvs_dict']['all']:
if mv['score'] >= thresh:
sel = (tab == mv['val']).any(1).diff().fillna(True)
sel.index = range(len(sel))
if randomize:
new_indexes = np.random.permutation(list(sel[sel].index))[:num_per_missing_val_to_display]
else:
new_indexes = list(sel[sel].index)[:num_per_missing_val_to_display]
row_idxs.extend(new_indexes)
return row_idxs
if __name__ == '__main__':
file_paths = ['../../data/test_dedupe/participants.csv',
'../../data/test/etablissements/bce_data_norm.csv',
'local_test_data/source.csv',
'local_test_data/emmanuel_1/equipe.csv',
'local_test_data/emmanuel_1/doctorale.csv',
'local_test_data/emmanuel_1/laboratoire.csv',
'local_test_data/integration_4/hal2.csv']
file_path = file_paths[-1] # Path to file to test
| |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of generally useful calculation tools."""
import functools
import warnings
import numpy as np
import numpy.ma as ma
from scipy.spatial import cKDTree
from . import height_to_pressure_std, pressure_to_height_std
from ..package_tools import Exporter
from ..units import check_units, units
exporter = Exporter(globals())
@exporter.export
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix
@exporter.export
def nearest_intersection_idx(a, b):
"""Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
"""
# Difference in the two y-value sets
difference = a - b
# Determine the point just before the intersection of the lines
# Will return multiple points for multiple intersections
sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
@exporter.export
@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))
def find_intersections(x, a, b, direction='all'):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
@exporter.export
def interpolate_nans(x, y, kind='linear'):
"""Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
"""
x_sort_args = np.argsort(x)
x = x[x_sort_args]
y = y[x_sort_args]
nans = np.isnan(y)
if kind == 'linear':
y[nans] = np.interp(x[nans], x[~nans], y[~nans])
elif kind == 'log':
y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans])
else:
raise ValueError('Unknown option for kind: {0}'.format(str(kind)))
return y[x_sort_args]
def _next_non_masked_element(a, idx):
"""Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
"""
try:
next_idx = idx + a[idx:].mask.argmin()
if ma.is_masked(a[next_idx]):
return None, None
else:
return next_idx, a[next_idx]
except (AttributeError, TypeError, IndexError):
return idx, a[idx]
def delete_masked_points(*arrs):
"""Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
"""
if any(hasattr(a, 'mask') for a in arrs):
keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))
return tuple(ma.asarray(a[keep]) for a in arrs)
else:
return arrs
@exporter.export
def reduce_point_density(points, radius, priority=None):
r"""Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask).
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : float
minimum radius allowed between points
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True], dtype=bool)
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False], dtype=bool)
"""
# Handle 1D input
if points.ndim < 2:
points = points.reshape(-1, 1)
# Make a kd-tree to speed searching of data.
tree = cKDTree(points)
# Need to use sorted indices rather than sorting the position
# so that the keep mask matches *original* order.
if priority is not None:
# Need to sort the locations in decreasing priority.
sorted_indices = np.argsort(priority)[::-1]
else:
# Take advantage of iterator nature of range here to avoid making big lists
sorted_indices = range(len(points))
# Keep all points initially
keep = np.ones(len(points), dtype=np.bool)
# Loop over all the potential points
for ind in sorted_indices:
# Only proceed if we haven't already excluded this point
if keep[ind]:
# Find the neighbors and eliminate them
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
# We just removed ourselves, so undo that
keep[ind] = True
return keep
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere is
assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, | |
max(len(x[0]) for x in s)+1
# Output for an entry in ``s`` of ("Year", "2016") with a ``max_len`` of 10
# would be: '= Year .....: 2016'
def line(k, v):
return f"{k.ljust(max_len, '.')}: {v}"
s = [line(*x) for x in s.items()]
# Now we can reuse ``max_len`` to mean the longest fully formatted line
# We want to add '= ' to the left side and ' =' to the right side to
# form a border
max_len = max(len(x) for x in s)
s = [f'= {x:{max_len}} =' for x in s]
max_len += 4
s = [" ALBUM INFORMATION ".center(max_len, "=")] + s + ["=" * max_len]
return "\n".join(s)
def GetOutputFilename(self, directory=None):
"""If an explicit filename was provided to command line arguments,
and ``_MergeWithArgs`` was called, typically via a subclass
constructor,then that filename is returned.
Otherwise a name of the form
Artist - Date - Title (Version) (Disc Disc#) \
DiscName (Label IssueDate Medium Channels).wav
is generated, where the parentheses are included only if any
part inside is non-empty. The Medium is blank if it is "CD".
Channels is not filled in if it is "2.0". Invalid filename
characters are removed or replaced with dashes.
"""
if self.forced_filename:
logging.debug('Forced filename or pre-computed file name = %s', self.filename)
return self.filename
tags = dict()
# Base tag
tags['base'] = f"{self['ARTIST']} - {self['DATE_RECORDED']} - {self['TITLE']}"
# Setup version subinfo
tags['version'] = f" ({self['VERSION']})" if self["VERSION"] else ""
# Setup label / release subinfo
channels = self.channels if self.channels != '2.0' else ''
if self["ORIGINAL_MEDIUM"] == "CD":
labeltag = f"{self['LABEL']} {self['ISSUE_DATE']} {channels}"
else:
labeltag = f"{self['LABEL']} {self['ISSUE_DATE']} {self['ORIGINAL_MEDIUM']} {channels}"
labeltag = labeltag.strip()
tags['label'] = labeltag and f" ({labeltag})"
# Setup disc tag
if self["PART_NUMBER"]:
disctag = f" (Disc {self['PART_NUMBER']}) {self['DISC_NAME']}"
else:
disctag = f" {self['DISC_NAME']}"
tags['disc'] = disctag.rstrip()
# Merge into filename
filename = f"{tags['base']}{tags['version']}{tags['disc']}{tags['label']}{ext.WAV}"
# Replace invalid characters with either a dash or remove them
filename = re.compile("[<>:/\\\\]").sub("-", filename)
filename = re.compile("[|?*]").sub("", filename)
# Replace invalid double quotes with valid single quotes
filename = filename.replace('"', "'")
if directory:
return os.path.join(directory, filename)
return filename
def PrintMetadata(self):
"""Formats and prints the metadata using the string
representation and adding in track-level details.
"""
def PrintTrack(trackno, track):
output = [f"File {str(trackno + 1).zfill(2)}:"]
with IgnoreKeyError:
output.append(f"Disc {track['disc']}")
with IgnoreKeyError:
output.append(f"Side {track['side']}")
output.append(f"Track {track['track'].ljust(2)}")
with IgnoreKeyError:
output.append(f"Phase {track['phase']}")
with IgnoreKeyError:
output.append(f"Subindex {track['subindex']}")
output.append(f"Time {track['start_time']}")
output.append(f'"{track["title"]}"')
with IgnoreKeyError:
output[-1] = f'{output[-1][:-1]}: {track["subtitle"]}"'
print(' '.join(output))
print(self)
for trackno, track in enumerate(self.tracks):
PrintTrack(trackno, track)
filename = self.GetOutputFilename().replace(ext.WAV, ext.MKA)
print("Filename:", filename)
def Confirm(self):
"""Prints out metadata using ``PrintMetadata`` then asks
the user if they want to continue. Any answer starting
with 'n' or 'N' is interpreted as "no" while any other
answer is interpreted as "yes".
Returns:
bool: True for user-approved merging, False for cancel
"""
self.PrintMetadata()
answer = input("Continue [Y/n]? ").lower()
return not answer.startswith("n")
def _GetAlbumLevelMetadata(files):
# Obtain album-level tags from the first file
# Assumption is these tags are the same for every file
tag = mutagen.flac.FLAC(files[0])
# These tags are copied directly
directmap = ["ARTIST", "GENRE", "LABEL", "ISSUE_DATE", "VERSION",
"ORIGINAL_MEDIUM", "DISC_NAME", "PHASE_NAME"]
# These are renamed
mapping = {"TITLE": "ALBUM", "DATE_RECORDED": "DATE"}
mapping.update({k: k for k in directmap})
result = {}
for rkey, tkey in mapping.items():
if tkey in tag:
logging.debug("Found key %s, %s with value %s", rkey, tkey, tag[tkey][0])
# ``mutagen.flac.FLAC`` behaves like a ``dict[str, list[str]]``
result[rkey] = tag[tkey][0]
return result
class AlbumMetadata(Metadata):
"""Metadata class holding information for a collection of FLAC files
from a single disc. Searches through the metadata tags in each FLAC
file to determine information about them then build up the ``Metadata``
structure.
Supported disc-level FLAC tags:
ARTIST, TITLE, DATE, GENRE, LABEL, ISSUE_DATE, VERSION,
ORIGINAL_MEDIUM, DISC_NAME, DISCTOTAL, DISCNUMBER, PHASE_NAME
Supported track-level FLAC tags:
TITLE, TRACKNUMBER, SIDE, SUBINDEX, SUBTITLE, PHASE
NB: DISCNUMBER is ignored if DISCTOTAL is not present, and vice versa,
or if both are '1'.
See parent class ``Metadata`` for more information.
"""
@property
def sumparts(self):
return True
def _initialize(self, args):
# First check for multidisc mode; after this we can assume that
# ``args.multidisc`` is ``False``
if args.multidisc:
raise ValueError("Cannot use 'AlbumMetadata' in multidisc mode, use 'MultidiscMetadata'")
# Pull in album level data
self.data.update(_GetAlbumLevelMetadata(self.source))
# Pull disc number from tags if both fields exist, but skip if disc 1/1
tag = self._GetTag()
if "DISCTOTAL" in tag and "DISCNUMBER" in tag:
discs = int(tag["DISCTOTAL"][0])
if discs > 1:
self["PART_NUMBER"] = tag["DISCNUMBER"][0]
self.discs = discs
# Pull track-level info: title, subindex, subtitle, start time, phase, side
mka_time = FLACTime()
for f in sorted(self.source):
if self.GetOutputFilename() in f.replace(ext.FLAC, ext.WAV):
continue
tag = mutagen.flac.FLAC(f)
try:
track = {"title": tag["TITLE"][0],
"track": tag["TRACKNUMBER"][0],
"start_time": mka_time.MKACode()}
except KeyError as key:
raise TagNotFoundError(f"{f} doesn't contain key {key}")
for t in ["SIDE", "SUBTITLE", "SUBINDEX", "PHASE"]:
with IgnoreKeyError:
track[t.lower()] = tag[t][0]
self.tracks.append(track)
mka_time += tag.info.length
def _GetTag(self):
return mutagen.flac.FLAC(self.source[0])
def GetDisc(track_info):
try:
return f"{track_info['disc']}{track_info['side']}"
except KeyError:
return track_info['disc']
class MultidiscMetadata(Metadata):
"""Metadata class holding information for a collection of FLAC files
from multiple discs. Searches through the metadata tags in each FLAC
file to determine information about them then build up the ``Metadata``
structure.
Supported collection-level FLAC tags (``self.data``):
DISCTOTAL, ARTIST, ALBUM TITLE, DATE, GENRE, LABEL,
ISSUE_DATE, ORIGINAL_MEDIUM, VERSION, PHASE_NAME
Supported disc-level FLAC tags (``self.disc_data``):
DISC_NAME, (Number of tracks is calculated automatically)
Supported track-level FLAC tags (``self.tracks``):
TITLE, TRACKNUMBER, SIDE, DISCNUMBER, SUBINDEX, SUBTITLE, PHASE
See parent class ``Metadata`` for more information.
"""
def __init__(self, source, args):
self.disc_data = {}
super().__init__(source, args)
@property
def sumparts(self):
return False
def _initialize(self, args):
logging.debug('tools.flac.metadata.MultidiscMetadata._initialize')
# First check for multidisc mode; after this we can assume that
# ``args.multidisc`` is ``True``
if not args.multidisc:
raise ValueError("Cannot use 'MultidiscMetadata' in non-multidisc mode, use 'AlbumMetadata'")
# Pull in album level data, handling "DISC_NAME" at the
# disc level, rather than the collection level
data = _GetAlbumLevelMetadata(self.source)
with IgnoreKeyError:
del data["DISC_NAME"]
self.data.update(data)
# Now pull track and disc-level information which varies from file to file
# Track level: track title, subtitle/subindex, start time, disc number
# side, and phase
# Disc level: disc name
mka_time = FLACTime()
for f in sorted(self.source):
if self.GetOutputFilename() in f.replace(ext.FLAC, ext.WAV):
continue
tag = mutagen.flac.FLAC(f)
try:
track = {"title": tag["TITLE"][0],
"track": tag["TRACKNUMBER"][0],
"start_time": mka_time.MKACode()}
except KeyError as key:
raise TagNotFoundError(f"{f} doesn't contain key {key}")
tags = {"disc": "DISCNUMBER",
"subindex": "SUBINDEX",
"subtitle": "SUBTITLE",
"side": "SIDE",
"phase": "PHASE"}
for skey, tkey in tags.items():
with IgnoreKeyError:
track[skey] = tag[tkey][0]
if GetDisc(track) not in self.disc_data:
with IgnoreKeyError:
self.disc_data[GetDisc(track)] = tag["DISC_NAME"][0]
self.tracks.append(track)
mka_time += tag.info.length
def _GetTag(self):
return mutagen.flac.FLAC(self.source[0])
def PrintMetadata(self):
Metadata.PrintMetadata(self)
for disc, name in sorted(self.disc_data.items()):
print(f"Disc {disc} Name: {name}")
class CueMetadata(Metadata):
"""Class holding the metadata information obtained from a CUE sheet file.
Searches through the CUE sheet to obtain certain tags, though it only
supports CUE sheets which describe one file. Multiple files can be handled
via ``AlbumMetadata`` provided the files are tagged correctly.
Supported top-level tags:
FILE, PERFORMER, TITLE
Support top-level remarks, starting with 'REM':
DATE, DISC, DISCS, GENRE, ISSUE_DATE, LABEL, VERSION,
ORIGINAL_MEDIUM, DISC_NAME
Can either be called directly via ``CueMetadata(cue_sheet_name, args)``
where args is an optional parameter, but suggested, or via
``GetMetadata(cue_sheet_name, args)`` so long as cue_sheet_name is a string
that ends with '.cue'.
See the parent class ``Metadata`` for more information.
"""
@property
def sumparts(self):
return True
def _initialize(self, args):
with open(self.source) as cue:
lines = cue.readlines()
for i, line in enumerate(lines):
if line.startswith("FILE"):
self.filename = CueMetadata.ExtractFilename(line).replace(ext.WAV, ext.FLAC)
elif line.startswith("REM DISCS"):
# This needs to come before 'REM DISC' otherwise it would be
# captured by 'REM DISC' instead. Also it's not stored in the
# dictionary component of ``self``.
self.discs = int(CueMetadata.ExtractProperty(line, "REM DISCS"))
elif line.startswith(" TRACK"):
self.tracks.append(CueMetadata.ExtractTrackInformation(lines[i:]))
elif not line.startswith(" "): # Search for additional top-level tags
remarks = ["GENRE", "ISSUE_DATE", "LABEL", "VERSION", "ORIGINAL_MEDIUM", "DISC_NAME"]
remarks = {f"REM {t}": t for t in remarks}
# Note that ``"REM DISC "`` has a space at the end because | |
# -*- coding: utf-8; -*-
"""Axially moving solid, Eulerian view, small-displacement regime (on top of uniform axial motion).
Three alternative formulations are provided, both in dynamic and in steady-state cases:
- `EulerianSolid`, `SteadyStateEulerianSolid`:
Straightforward Eulerian description. Variables are `u(x, t)`, `v(x, t) := ∂u/∂t`, and `σ(x, t)`.
`v` is the Eulerian rate of `u`.
- `EulerianSolidAlternative`, `SteadyStateEulerianSolidAlternative`:
Eulerian description using material parcel velocity. Variables are `u(x, t)`, `v(x, t) := du/dt`,
and `σ(x, t)`.
`v` is the material derivative of `u`; it is the actual physical velocity of the material
parcels with respect to the co-moving frame.
Note `v` is still an Eulerian field; it is a spatial description of a material quantity!
- `EulerianSolidPrimal`, `SteadyStateEulerianSolidPrimal`:
Eulerian description using material parcel velocity and primal variables only. Variables are
`u(x, t)`, and `v(x, t) := du/dt`.
`v` is the material derivative of `u`; it is the actual physical velocity of the material
parcels with respect to the co-moving frame.
Note `v` is still an Eulerian field; it is a spatial description of a material quantity!
This is the cleanest formulation, and the fastest solver.
**NOTE**:
Of the steady-state solvers, currently only `SteadyStateEulerianSolidPrimal` converges to the
correct solution; this is still something to be investigated later. For now, if you want the
steady state, just use `SteadyStateEulerianSolidPrimal`.
All three dynamic solvers work as expected.
"""
__all__ = ["EulerianSolid",
"SteadyStateEulerianSolid", # does not work yet
"EulerianSolidAlternative",
"SteadyStateEulerianSolidAlternative", # does not work yet
"EulerianSolidPrimal",
"SteadyStateEulerianSolidPrimal",
"step_adaptive"]
from contextlib import contextmanager
import typing
from fenics import (VectorFunctionSpace, TensorFunctionSpace,
MixedElement, FunctionSpace, TrialFunctions, TestFunctions, split, FunctionAssigner, project,
TrialFunction, TestFunction,
Constant, Expression, Function,
FacetNormal, DirichletBC,
dot, inner, outer, sym, tr,
nabla_grad, div, dx, ds,
Identity,
lhs, rhs, assemble, solve,
interpolate, VectorSpaceBasis, as_backend_type,
norm,
begin, end)
from ..meshfunction import meshsize, cell_mf_to_expression
from .numutil import ε, mag, advw, advs
from .util import ufl_constant_property, StabilizerFlags
def null_space_fields(dim):
"""Set up null space for removal in the Krylov solver.
Return a `list` of rigid-body modes of geometric dimension `dim` as FEniCS
expressions. These can then be projected into the correct finite element space.
Null space of the linear momentum balance is {u: ε(u) = 0 and ∇·u = 0}
This consists of rigid-body translations and infinitesimal rigid-body rotations.
Strictly, this is the null space of linear elasticity, but the physics shouldn't
be that much different for the other linear models.
See:
https://fenicsproject.discourse.group/t/rotation-in-null-space-for-elasticity/4083
https://bitbucket.org/fenics-project/dolfin/src/946dbd3e268dc20c64778eb5b734941ca5c343e5/python/demo/undocumented/elasticity/demo_elasticity.py#lines-35:52
https://bitbucket.org/fenics-project/dolfin/issues/587/functionassigner-does-not-always-call
"""
if dim == 1:
fus = [Constant(1)]
elif dim == 2:
fus = [Constant((1, 0)),
Constant((0, 1)),
Expression(("x[1]", "-x[0]"), degree=1)] # around z axis (clockwise)
elif dim == 3:
fus = [Constant((1, 0, 0)),
Constant((0, 1, 0)),
Constant((0, 0, 1)),
Expression(("0", "x[2]", "-x[1]"), degree=1), # around x axis (clockwise)
Expression(("-x[2]", "0", "x[0]"), degree=1), # around y axis (clockwise)
Expression(("x[1]", "-x[0]", "0"), degree=1)] # around z axis (clockwise)
else:
raise NotImplementedError(f"dim = {dim}")
return fus
class EulerianSolidStabilizerFlags(StabilizerFlags):
"""Interface for numerical stabilizer on/off flags.
Collects them into one namespace; handles translation between
`bool` values and the UFL expressions that are actually used
in the equations.
Usage::
print(solver.stabilizers) # status --> "<EulerianSolidStabilizerFlags: SUPG(True)>"
solver.stabilizers.SUPG = True # enable SUPG
solver.stabilizers.SUPG = False # disable SUPG
"""
def __init__(self): # set up the UFL expressions for the flags
super().__init__()
self._SUPG = Expression('b', degree=0, b=1.0)
def _get_SUPG(self) -> bool:
return bool(self._SUPG.b)
def _set_SUPG(self, b: bool) -> None:
self._SUPG.b = float(b)
SUPG = property(fget=_get_SUPG, fset=_set_SUPG, doc="Streamline upwinding Petrov-Galerkin, for advection-dominant problems.")
# TODO: use nondimensional form
class EulerianSolid:
"""Axially moving linear solid, small-displacement Eulerian formulation.
For now, this solver provides the linear elastic and Kelvin-Voigt models.
The spatial discretization is based on a mixed formulation. For linear
viscoelastic models, the axial motion introduces a third derivative in the
strong form of the primal formulation. Therefore, the primal formulation
requires C1 elements (not available in FEniCS, for mathematical reasons
outlined in Kirby & Mitchell, 2019; this was done manually for an axially
moving sheet in Kurki et al., 2016).
The alternative, chosen here, is a mixed formulation where both `u` and `σ`
appear as unknowns. The additional derivative from the axial motion then
appears as a spatial derivative of ε in the constitutive equation for σ.
Time integration is performed using the θ method; Crank-Nicolson by default.
`V`: vector function space for displacement
`Q`: tensor function space for stress
`P`: tensor function space for strain projection
Strains are L2-projected into `P` before using them in the constitutive
law. Improves stability in Kelvin-Voigt in the presence of axial motion.
`ρ`: density [kg / m³]
`λ`: Lamé's first parameter [Pa]
`μ`: shear modulus [Pa]
`τ`: Kelvin-Voigt retardation time [s].
Defined as `τ := η / E`, where `E` is Young's modulus [Pa], and
`η` is the viscous modulus [Pa s].
**CAUTION**: The initial value of `τ` passed in to the constructor
determines the material model.
If you pass in `τ=...` with a nonzero value, the solver will
set up the PDEs for the Kelvin-Voigt model.
If you pass in `τ=0`, the solver will set up the PDEs for the
linear elastic model.
Setting the value of `τ` later (`solver.τ = ...`) does **not**
affect which model is in use; so if needed, you can perform
studies with Kelvin-Voigt where `τ` changes quasistatically.
To force a model change later, set the new value of `τ` first,
and then call the `compile_forms` method to refresh the PDEs.
`V0`: velocity of co-moving frame in +x direction (constant) [m/s]
`bcv`: Dirichlet boundary conditions for Eulerian displacement rate ∂u/∂t.
The displacement `u` takes no BCs; use an initial condition instead.
`bcσ`: Dirichlet boundary conditions for stress (NOTE: must set only n·σ).
Alternative for setting `v`.
`dt`: timestep [s]
`θ`: theta-parameter for the time integrator, θ ∈ [0, 1].
Default 0.5 is Crank-Nicolson; 0 is forward Euler, 1 is backward Euler.
Note that for θ = 0, the SUPG stabilization parameter τ_SUPG → 0,
so when using forward Euler, it does not make sense to enable the
SUPG stabilizer.
As the mesh, we use `V.mesh()`; both `V` and `Q` must be defined on the same mesh.
For LBB-condition-related reasons, the space `Q` must be much larger than `V`; both
{V=Q1, Q=Q2} and {V=Q1, Q=Q3} have been tested to work and to yield similar results.
Near-term future plans (when I next have time to work on this project) include
extending this to support SLS (the standard linear solid); this should be a fairly
minor modification, just replacing the equation for `σ` by a PDE; no infra changes.
Far-future plans include a viscoplastic model of the Chaboche family (of which a
Lagrangean formulation is available in the Julia package `Materials.jl`).
**Equations**:
The formulation used by `EulerianSolid` is perhaps the most straightforward
Eulerian formulation for an axially moving continuum. The momentum balance
for a continuum is
ρ dV/dt - ∇·σ = ρ b
where `V` is the material parcel velocity in an inertial frame of our
choice (the law is postulated to be Galilean invariant).
Let us choose the laboratory frame. We have
dV/dt ≈ [∂²u/∂t² + 2 (a·∇) ∂u/∂t + (a·∇)(a·∇) u]
Thus the Eulerian description of the momentum balance becomes
ρ ∂²u/∂t² + 2 ρ (a·∇) ∂u/∂t + ρ (a·∇)(a·∇)u - ∇·σ = ρ b
where, for Kelvin-Voigt (linear elastic as special case τ = 0):
σ = E : ε + η : dε/dt
= E : (symm ∇u) + η : d/dt (symm ∇u)
= E : (symm ∇) u + η : d/dt (symm ∇) u
= E : (symm ∇) u + η : (symm ∇) du/dt
= E : (symm ∇) u + η : (symm ∇) (∂u/∂t + (a·∇)) u
= E : (symm ∇) u + η : [(symm ∇) v + (a·∇) u]
= E : (symm ∇) u + τ E : [(symm ∇) v + (symm ∇) (a·∇) u]
= E : (symm ∇) u + τ E : [(symm ∇) v + | |
"""
Reserve(TNEGraph self, int const & Nodes, int const & Edges)
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TNEGraph_Reserve(self, *args)
def Defrag(self, OnlyNodeLinks=False):
"""
Defrag(TNEGraph self, bool const & OnlyNodeLinks=False)
Parameters:
OnlyNodeLinks: bool const &
Defrag(TNEGraph self)
Parameters:
self: TNEGraph *
"""
return _snap.TNEGraph_Defrag(self, OnlyNodeLinks)
def IsOk(self, ThrowExcept=True):
"""
IsOk(TNEGraph self, bool const & ThrowExcept=True) -> bool
Parameters:
ThrowExcept: bool const &
IsOk(TNEGraph self) -> bool
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_IsOk(self, ThrowExcept)
def Dump(self, *args):
"""
Dump(TNEGraph self, FILE * OutF=stdout)
Parameters:
OutF: FILE *
Dump(TNEGraph self)
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_Dump(self, *args)
__swig_destroy__ = _snap.delete_TNEGraph
TNEGraph.Save = new_instancemethod(_snap.TNEGraph_Save,None,TNEGraph)
TNEGraph.HasFlag = new_instancemethod(_snap.TNEGraph_HasFlag,None,TNEGraph)
TNEGraph.GetNodes = new_instancemethod(_snap.TNEGraph_GetNodes,None,TNEGraph)
TNEGraph.AddNode = new_instancemethod(_snap.TNEGraph_AddNode,None,TNEGraph)
TNEGraph.DelNode = new_instancemethod(_snap.TNEGraph_DelNode,None,TNEGraph)
TNEGraph.IsNode = new_instancemethod(_snap.TNEGraph_IsNode,None,TNEGraph)
TNEGraph.BegNI = new_instancemethod(_snap.TNEGraph_BegNI,None,TNEGraph)
TNEGraph.EndNI = new_instancemethod(_snap.TNEGraph_EndNI,None,TNEGraph)
TNEGraph.GetNI = new_instancemethod(_snap.TNEGraph_GetNI,None,TNEGraph)
TNEGraph.GetMxNId = new_instancemethod(_snap.TNEGraph_GetMxNId,None,TNEGraph)
TNEGraph.GetEdges = new_instancemethod(_snap.TNEGraph_GetEdges,None,TNEGraph)
TNEGraph.AddEdge = new_instancemethod(_snap.TNEGraph_AddEdge,None,TNEGraph)
TNEGraph.DelEdge = new_instancemethod(_snap.TNEGraph_DelEdge,None,TNEGraph)
TNEGraph.IsEdge = new_instancemethod(_snap.TNEGraph_IsEdge,None,TNEGraph)
TNEGraph.GetEId = new_instancemethod(_snap.TNEGraph_GetEId,None,TNEGraph)
TNEGraph.BegEI = new_instancemethod(_snap.TNEGraph_BegEI,None,TNEGraph)
TNEGraph.EndEI = new_instancemethod(_snap.TNEGraph_EndEI,None,TNEGraph)
TNEGraph.GetEI = new_instancemethod(_snap.TNEGraph_GetEI,None,TNEGraph)
TNEGraph.GetRndNId = new_instancemethod(_snap.TNEGraph_GetRndNId,None,TNEGraph)
TNEGraph.GetRndNI = new_instancemethod(_snap.TNEGraph_GetRndNI,None,TNEGraph)
TNEGraph.GetRndEId = new_instancemethod(_snap.TNEGraph_GetRndEId,None,TNEGraph)
TNEGraph.GetRndEI = new_instancemethod(_snap.TNEGraph_GetRndEI,None,TNEGraph)
TNEGraph.GetNIdV = new_instancemethod(_snap.TNEGraph_GetNIdV,None,TNEGraph)
TNEGraph.GetEIdV = new_instancemethod(_snap.TNEGraph_GetEIdV,None,TNEGraph)
TNEGraph.Empty = new_instancemethod(_snap.TNEGraph_Empty,None,TNEGraph)
TNEGraph.Clr = new_instancemethod(_snap.TNEGraph_Clr,None,TNEGraph)
TNEGraph.Reserve = new_instancemethod(_snap.TNEGraph_Reserve,None,TNEGraph)
TNEGraph.Defrag = new_instancemethod(_snap.TNEGraph_Defrag,None,TNEGraph)
TNEGraph.IsOk = new_instancemethod(_snap.TNEGraph_IsOk,None,TNEGraph)
TNEGraph.Dump = new_instancemethod(_snap.TNEGraph_Dump,None,TNEGraph)
TNEGraph_swigregister = _snap.TNEGraph_swigregister
TNEGraph_swigregister(TNEGraph)
def TNEGraph_New(*args):
"""
New() -> PNEGraph
TNEGraph_New(int const & Nodes, int const & Edges) -> PNEGraph
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TNEGraph_New(*args)
def TNEGraph_Load(*args):
"""
TNEGraph_Load(TSIn SIn) -> PNEGraph
Parameters:
SIn: TSIn &
"""
return _snap.TNEGraph_Load(*args)
class TBPGraph(object):
"""Proxy of C++ TBPGraph class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
bgsUndef = _snap.TBPGraph_bgsUndef
bgsLeft = _snap.TBPGraph_bgsLeft
bgsRight = _snap.TBPGraph_bgsRight
bgsBoth = _snap.TBPGraph_bgsBoth
def __init__(self, *args):
"""
__init__(TBPGraph self) -> TBPGraph
__init__(TBPGraph self, int const & Nodes, int const & Edges) -> TBPGraph
Parameters:
Nodes: int const &
Edges: int const &
__init__(TBPGraph self, TBPGraph BPGraph) -> TBPGraph
Parameters:
BPGraph: TBPGraph const &
__init__(TBPGraph self, TSIn SIn) -> TBPGraph
Parameters:
SIn: TSIn &
"""
_snap.TBPGraph_swiginit(self,_snap.new_TBPGraph(*args))
def Save(self, *args):
"""
Save(TBPGraph self, TSOut SOut)
Parameters:
SOut: TSOut &
"""
return _snap.TBPGraph_Save(self, *args)
def New(*args):
"""
New() -> PBPGraph
New(int const & Nodes, int const & Edges) -> PBPGraph
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TBPGraph_New(*args)
New = staticmethod(New)
def Load(*args):
"""
Load(TSIn SIn) -> PBPGraph
Parameters:
SIn: TSIn &
"""
return _snap.TBPGraph_Load(*args)
Load = staticmethod(Load)
def GetNodes(self):
"""
GetNodes(TBPGraph self) -> int
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_GetNodes(self)
def GetLNodes(self):
"""
GetLNodes(TBPGraph self) -> int
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_GetLNodes(self)
def GetRNodes(self):
"""
GetRNodes(TBPGraph self) -> int
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_GetRNodes(self)
def AddNode(self, *args):
"""
AddNode(TBPGraph self, int NId=-1, bool const & LeftNode=True) -> int
Parameters:
NId: int
LeftNode: bool const &
AddNode(TBPGraph self, int NId=-1) -> int
Parameters:
NId: int
AddNode(TBPGraph self) -> int
AddNode(TBPGraph self, TBPGraph::TNodeI const & NodeI) -> int
Parameters:
NodeI: TBPGraph::TNodeI const &
"""
return _snap.TBPGraph_AddNode(self, *args)
def DelNode(self, *args):
"""
DelNode(TBPGraph self, int const & NId)
Parameters:
NId: int const &
DelNode(TBPGraph self, TBPGraph::TNode const & NodeI)
Parameters:
NodeI: TBPGraph::TNode const &
"""
return _snap.TBPGraph_DelNode(self, *args)
def IsNode(self, *args):
"""
IsNode(TBPGraph self, int const & NId) -> bool
Parameters:
NId: int const &
"""
return _snap.TBPGraph_IsNode(self, *args)
def IsLNode(self, *args):
"""
IsLNode(TBPGraph self, int const & NId) -> bool
Parameters:
NId: int const &
"""
return _snap.TBPGraph_IsLNode(self, *args)
def IsRNode(self, *args):
"""
IsRNode(TBPGraph self, int const & NId) -> bool
Parameters:
NId: int const &
"""
return _snap.TBPGraph_IsRNode(self, *args)
def GetMxNId(self):
"""
GetMxNId(TBPGraph self) -> int
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_GetMxNId(self)
def BegNI(self):
"""
BegNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_BegNI(self)
def EndNI(self):
"""
EndNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_EndNI(self)
def GetNI(self, *args):
"""
GetNI(TBPGraph self, int const & NId) -> TBPGraph::TNodeI
Parameters:
NId: int const &
"""
return _snap.TBPGraph_GetNI(self, *args)
def BegLNI(self):
"""
BegLNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_BegLNI(self)
def EndLNI(self):
"""
EndLNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_EndLNI(self)
def BegRNI(self):
"""
BegRNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_BegRNI(self)
def EndRNI(self):
"""
EndRNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_EndRNI(self)
def GetEdges(self):
"""
GetEdges(TBPGraph self) -> int
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_GetEdges(self)
def AddEdge(self, *args):
"""
AddEdge(TBPGraph self, int const & LeftNId, int const & RightNId) -> int
Parameters:
LeftNId: int const &
RightNId: int const &
AddEdge(TBPGraph self, TBPGraph::TEdgeI const & EdgeI) -> int
Parameters:
EdgeI: TBPGraph::TEdgeI const &
"""
return _snap.TBPGraph_AddEdge(self, *args)
def DelEdge(self, *args):
"""
DelEdge(TBPGraph self, int const & LeftNId, int const & RightNId)
Parameters:
LeftNId: int const &
RightNId: int const &
"""
return _snap.TBPGraph_DelEdge(self, *args)
def IsEdge(self, *args):
"""
IsEdge(TBPGraph self, int const & LeftNId, int const & RightNId) -> bool
Parameters:
LeftNId: int const &
RightNId: int const &
"""
return _snap.TBPGraph_IsEdge(self, *args)
def BegEI(self):
"""
BegEI(TBPGraph self) -> TBPGraph::TEdgeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_BegEI(self)
def EndEI(self):
"""
EndEI(TBPGraph self) -> TBPGraph::TEdgeI
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_EndEI(self)
def GetEI(self, *args):
"""
GetEI(TBPGraph self, int const & LeftNId, int const & RightNId) -> TBPGraph::TEdgeI
Parameters:
LeftNId: int const &
RightNId: int const &
"""
return _snap.TBPGraph_GetEI(self, *args)
def GetRndNId(self, *args):
"""
GetRndNId(TBPGraph self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndNId(TBPGraph self) -> int
Parameters:
self: TBPGraph *
"""
return _snap.TBPGraph_GetRndNId(self, *args)
def GetRndLNId(self, *args):
"""
GetRndLNId(TBPGraph self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndLNId(TBPGraph self) -> int
Parameters:
self: TBPGraph *
"""
return _snap.TBPGraph_GetRndLNId(self, *args)
def GetRndRNId(self, *args):
"""
GetRndRNId(TBPGraph self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndRNId(TBPGraph self) -> int
Parameters:
self: TBPGraph *
"""
return _snap.TBPGraph_GetRndRNId(self, *args)
def GetRndNI(self, *args):
"""
GetRndNI(TBPGraph self, TRnd Rnd=Rnd) -> TBPGraph::TNodeI
Parameters:
Rnd: TRnd &
GetRndNI(TBPGraph self) -> TBPGraph::TNodeI
Parameters:
self: TBPGraph *
"""
return _snap.TBPGraph_GetRndNI(self, *args)
def GetNIdV(self, *args):
"""
GetNIdV(TBPGraph self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TBPGraph_GetNIdV(self, *args)
def GetLNIdV(self, *args):
"""
GetLNIdV(TBPGraph self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TBPGraph_GetLNIdV(self, *args)
def GetRNIdV(self, *args):
"""
GetRNIdV(TBPGraph self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TBPGraph_GetRNIdV(self, *args)
def Empty(self):
"""
Empty(TBPGraph self) -> bool
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_Empty(self)
def Clr(self):
"""
Clr(TBPGraph self)
Parameters:
self: TBPGraph *
"""
return _snap.TBPGraph_Clr(self)
def Reserve(self, *args):
"""
Reserve(TBPGraph self, int const & Nodes, int const & Edges)
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TBPGraph_Reserve(self, *args)
def Defrag(self, OnlyNodeLinks=False):
"""
Defrag(TBPGraph self, bool const & OnlyNodeLinks=False)
Parameters:
OnlyNodeLinks: bool const &
Defrag(TBPGraph self)
Parameters:
self: TBPGraph *
"""
return _snap.TBPGraph_Defrag(self, OnlyNodeLinks)
def IsOk(self, ThrowExcept=True):
"""
IsOk(TBPGraph self, bool const & ThrowExcept=True) -> bool
Parameters:
ThrowExcept: bool const &
IsOk(TBPGraph self) -> bool
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_IsOk(self, ThrowExcept)
def Dump(self, *args):
"""
Dump(TBPGraph self, FILE * OutF=stdout)
Parameters:
OutF: FILE *
Dump(TBPGraph self)
Parameters:
self: TBPGraph const *
"""
return _snap.TBPGraph_Dump(self, *args)
def GetSmallGraph():
"""GetSmallGraph() -> PBPGraph"""
return _snap.TBPGraph_GetSmallGraph()
GetSmallGraph = staticmethod(GetSmallGraph)
__swig_destroy__ = _snap.delete_TBPGraph
TBPGraph.Save = new_instancemethod(_snap.TBPGraph_Save,None,TBPGraph)
TBPGraph.GetNodes = new_instancemethod(_snap.TBPGraph_GetNodes,None,TBPGraph)
TBPGraph.GetLNodes = new_instancemethod(_snap.TBPGraph_GetLNodes,None,TBPGraph)
TBPGraph.GetRNodes = new_instancemethod(_snap.TBPGraph_GetRNodes,None,TBPGraph)
TBPGraph.AddNode = new_instancemethod(_snap.TBPGraph_AddNode,None,TBPGraph)
TBPGraph.DelNode = new_instancemethod(_snap.TBPGraph_DelNode,None,TBPGraph)
TBPGraph.IsNode = new_instancemethod(_snap.TBPGraph_IsNode,None,TBPGraph)
TBPGraph.IsLNode = new_instancemethod(_snap.TBPGraph_IsLNode,None,TBPGraph)
TBPGraph.IsRNode = new_instancemethod(_snap.TBPGraph_IsRNode,None,TBPGraph)
TBPGraph.GetMxNId = new_instancemethod(_snap.TBPGraph_GetMxNId,None,TBPGraph)
TBPGraph.BegNI = new_instancemethod(_snap.TBPGraph_BegNI,None,TBPGraph)
TBPGraph.EndNI = new_instancemethod(_snap.TBPGraph_EndNI,None,TBPGraph)
TBPGraph.GetNI = new_instancemethod(_snap.TBPGraph_GetNI,None,TBPGraph)
TBPGraph.BegLNI = new_instancemethod(_snap.TBPGraph_BegLNI,None,TBPGraph)
TBPGraph.EndLNI = new_instancemethod(_snap.TBPGraph_EndLNI,None,TBPGraph)
TBPGraph.BegRNI = new_instancemethod(_snap.TBPGraph_BegRNI,None,TBPGraph)
TBPGraph.EndRNI = new_instancemethod(_snap.TBPGraph_EndRNI,None,TBPGraph)
TBPGraph.GetEdges = new_instancemethod(_snap.TBPGraph_GetEdges,None,TBPGraph)
TBPGraph.AddEdge = new_instancemethod(_snap.TBPGraph_AddEdge,None,TBPGraph)
TBPGraph.DelEdge = new_instancemethod(_snap.TBPGraph_DelEdge,None,TBPGraph)
TBPGraph.IsEdge = new_instancemethod(_snap.TBPGraph_IsEdge,None,TBPGraph)
TBPGraph.BegEI = new_instancemethod(_snap.TBPGraph_BegEI,None,TBPGraph)
TBPGraph.EndEI = new_instancemethod(_snap.TBPGraph_EndEI,None,TBPGraph)
TBPGraph.GetEI = new_instancemethod(_snap.TBPGraph_GetEI,None,TBPGraph)
TBPGraph.GetRndNId = new_instancemethod(_snap.TBPGraph_GetRndNId,None,TBPGraph)
TBPGraph.GetRndLNId = new_instancemethod(_snap.TBPGraph_GetRndLNId,None,TBPGraph)
TBPGraph.GetRndRNId = new_instancemethod(_snap.TBPGraph_GetRndRNId,None,TBPGraph)
TBPGraph.GetRndNI = new_instancemethod(_snap.TBPGraph_GetRndNI,None,TBPGraph)
TBPGraph.GetNIdV = new_instancemethod(_snap.TBPGraph_GetNIdV,None,TBPGraph)
TBPGraph.GetLNIdV = new_instancemethod(_snap.TBPGraph_GetLNIdV,None,TBPGraph)
TBPGraph.GetRNIdV = new_instancemethod(_snap.TBPGraph_GetRNIdV,None,TBPGraph)
TBPGraph.Empty = new_instancemethod(_snap.TBPGraph_Empty,None,TBPGraph)
TBPGraph.Clr = new_instancemethod(_snap.TBPGraph_Clr,None,TBPGraph)
TBPGraph.Reserve = new_instancemethod(_snap.TBPGraph_Reserve,None,TBPGraph)
TBPGraph.Defrag = new_instancemethod(_snap.TBPGraph_Defrag,None,TBPGraph)
TBPGraph.IsOk = new_instancemethod(_snap.TBPGraph_IsOk,None,TBPGraph)
TBPGraph.Dump = new_instancemethod(_snap.TBPGraph_Dump,None,TBPGraph)
TBPGraph_swigregister = _snap.TBPGraph_swigregister
TBPGraph_swigregister(TBPGraph)
def TBPGraph_New(*args):
"""
New() -> PBPGraph
TBPGraph_New(int const & Nodes, int const & Edges) -> PBPGraph
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TBPGraph_New(*args)
def TBPGraph_Load(*args):
"""
TBPGraph_Load(TSIn SIn) -> PBPGraph
Parameters:
SIn: TSIn &
"""
return _snap.TBPGraph_Load(*args)
def TBPGraph_GetSmallGraph():
"""TBPGraph_GetSmallGraph() -> PBPGraph"""
return _snap.TBPGraph_GetSmallGraph()
class TNGraphMtx(object):
"""Proxy of C++ TNGraphMtx class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), |